VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 87606

Last change on this file since 87606 was 87594, checked in by vboxsync, 4 years ago

VMM/DBGF,Debugger: Removed the !defined(VBOX_WITH_LOTS_OF_DBGF_BPS) code. bugref:9837

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 98.7 KB
Line 
1/* $Id: VMMR0.cpp 87594 2021-02-03 20:23:46Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_NEM_R0
31# include <VBox/vmm/nem.h>
32#endif
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/stam.h>
35#include <VBox/vmm/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvm.h>
39#ifdef VBOX_WITH_PCI_PASSTHROUGH
40# include <VBox/vmm/pdmpci.h>
41#endif
42#include <VBox/vmm/apic.h>
43
44#include <VBox/vmm/gvmm.h>
45#include <VBox/vmm/gmm.h>
46#include <VBox/vmm/gim.h>
47#include <VBox/intnet.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/param.h>
50#include <VBox/err.h>
51#include <VBox/version.h>
52#include <VBox/log.h>
53
54#include <iprt/asm-amd64-x86.h>
55#include <iprt/assert.h>
56#include <iprt/crc.h>
57#include <iprt/mp.h>
58#include <iprt/once.h>
59#include <iprt/stdarg.h>
60#include <iprt/string.h>
61#include <iprt/thread.h>
62#include <iprt/timer.h>
63#include <iprt/time.h>
64
65#include "dtrace/VBoxVMM.h"
66
67
68#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
69# pragma intrinsic(_AddressOfReturnAddress)
70#endif
71
72#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
73# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
74#endif
75
76
77
78/*********************************************************************************************************************************
79* Defined Constants And Macros *
80*********************************************************************************************************************************/
81/** @def VMM_CHECK_SMAP_SETUP
82 * SMAP check setup. */
83/** @def VMM_CHECK_SMAP_CHECK
84 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
85 * it will be logged and @a a_BadExpr is executed. */
86/** @def VMM_CHECK_SMAP_CHECK2
87 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
88 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
89 * executed. */
90#if (defined(VBOX_STRICT) || 1) && !defined(VBOX_WITH_RAM_IN_KERNEL)
91# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
92# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
93 do { \
94 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
95 { \
96 RTCCUINTREG fEflCheck = ASMGetFlags(); \
97 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
98 { /* likely */ } \
99 else \
100 { \
101 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
102 a_BadExpr; \
103 } \
104 } \
105 } while (0)
106# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \
107 do { \
108 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
109 { \
110 RTCCUINTREG fEflCheck = ASMGetFlags(); \
111 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
112 { /* likely */ } \
113 else if (a_pGVM) \
114 { \
115 SUPR0BadContext((a_pGVM)->pSession, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
116 RTStrPrintf((a_pGVM)->vmm.s.szRing0AssertMsg1, sizeof((a_pGVM)->vmm.s.szRing0AssertMsg1), \
117 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
118 a_BadExpr; \
119 } \
120 else \
121 { \
122 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
123 a_BadExpr; \
124 } \
125 } \
126 } while (0)
127#else
128# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
129# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
130# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) NOREF(fKernelFeatures)
131#endif
132
133
134/*********************************************************************************************************************************
135* Internal Functions *
136*********************************************************************************************************************************/
137RT_C_DECLS_BEGIN
138#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
139extern uint64_t __udivdi3(uint64_t, uint64_t);
140extern uint64_t __umoddi3(uint64_t, uint64_t);
141#endif
142RT_C_DECLS_END
143
144
145/*********************************************************************************************************************************
146* Global Variables *
147*********************************************************************************************************************************/
148/** Drag in necessary library bits.
149 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
150struct CLANG11WEIRDNOTHROW { PFNRT pfn; } g_VMMR0Deps[] =
151{
152 { (PFNRT)RTCrc32 },
153 { (PFNRT)RTOnce },
154#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
155 { (PFNRT)__udivdi3 },
156 { (PFNRT)__umoddi3 },
157#endif
158 { NULL }
159};
160
161#ifdef RT_OS_SOLARIS
162/* Dependency information for the native solaris loader. */
163extern "C" { char _depends_on[] = "vboxdrv"; }
164#endif
165
166
167/**
168 * Initialize the module.
169 * This is called when we're first loaded.
170 *
171 * @returns 0 on success.
172 * @returns VBox status on failure.
173 * @param hMod Image handle for use in APIs.
174 */
175DECLEXPORT(int) ModuleInit(void *hMod)
176{
177 VMM_CHECK_SMAP_SETUP();
178 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
179
180#ifdef VBOX_WITH_DTRACE_R0
181 /*
182 * The first thing to do is register the static tracepoints.
183 * (Deregistration is automatic.)
184 */
185 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
186 if (RT_FAILURE(rc2))
187 return rc2;
188#endif
189 LogFlow(("ModuleInit:\n"));
190
191#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
192 /*
193 * Display the CMOS debug code.
194 */
195 ASMOutU8(0x72, 0x03);
196 uint8_t bDebugCode = ASMInU8(0x73);
197 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
198 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
199#endif
200
201 /*
202 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
203 */
204 int rc = vmmInitFormatTypes();
205 if (RT_SUCCESS(rc))
206 {
207 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
208 rc = GVMMR0Init();
209 if (RT_SUCCESS(rc))
210 {
211 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
212 rc = GMMR0Init();
213 if (RT_SUCCESS(rc))
214 {
215 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
216 rc = HMR0Init();
217 if (RT_SUCCESS(rc))
218 {
219 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
220
221 PDMR0Init(hMod);
222 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
223
224 rc = PGMRegisterStringFormatTypes();
225 if (RT_SUCCESS(rc))
226 {
227 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
228#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
229 rc = PGMR0DynMapInit();
230#endif
231 if (RT_SUCCESS(rc))
232 {
233 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
234 rc = IntNetR0Init();
235 if (RT_SUCCESS(rc))
236 {
237#ifdef VBOX_WITH_PCI_PASSTHROUGH
238 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
239 rc = PciRawR0Init();
240#endif
241 if (RT_SUCCESS(rc))
242 {
243 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
244 rc = CPUMR0ModuleInit();
245 if (RT_SUCCESS(rc))
246 {
247#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
248 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
249 rc = vmmR0TripleFaultHackInit();
250 if (RT_SUCCESS(rc))
251#endif
252 {
253 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
254 if (RT_SUCCESS(rc))
255 {
256 LogFlow(("ModuleInit: returns success\n"));
257 return VINF_SUCCESS;
258 }
259 }
260
261 /*
262 * Bail out.
263 */
264#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
265 vmmR0TripleFaultHackTerm();
266#endif
267 }
268 else
269 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
270#ifdef VBOX_WITH_PCI_PASSTHROUGH
271 PciRawR0Term();
272#endif
273 }
274 else
275 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
276 IntNetR0Term();
277 }
278 else
279 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
280#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
281 PGMR0DynMapTerm();
282#endif
283 }
284 else
285 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
286 PGMDeregisterStringFormatTypes();
287 }
288 else
289 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
290 HMR0Term();
291 }
292 else
293 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
294 GMMR0Term();
295 }
296 else
297 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
298 GVMMR0Term();
299 }
300 else
301 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
302 vmmTermFormatTypes();
303 }
304 else
305 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
306
307 LogFlow(("ModuleInit: failed %Rrc\n", rc));
308 return rc;
309}
310
311
312/**
313 * Terminate the module.
314 * This is called when we're finally unloaded.
315 *
316 * @param hMod Image handle for use in APIs.
317 */
318DECLEXPORT(void) ModuleTerm(void *hMod)
319{
320 NOREF(hMod);
321 LogFlow(("ModuleTerm:\n"));
322
323 /*
324 * Terminate the CPUM module (Local APIC cleanup).
325 */
326 CPUMR0ModuleTerm();
327
328 /*
329 * Terminate the internal network service.
330 */
331 IntNetR0Term();
332
333 /*
334 * PGM (Darwin), HM and PciRaw global cleanup.
335 */
336#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
337 PGMR0DynMapTerm();
338#endif
339#ifdef VBOX_WITH_PCI_PASSTHROUGH
340 PciRawR0Term();
341#endif
342 PGMDeregisterStringFormatTypes();
343 HMR0Term();
344#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
345 vmmR0TripleFaultHackTerm();
346#endif
347
348 /*
349 * Destroy the GMM and GVMM instances.
350 */
351 GMMR0Term();
352 GVMMR0Term();
353
354 vmmTermFormatTypes();
355
356 LogFlow(("ModuleTerm: returns\n"));
357}
358
359
360/**
361 * Initiates the R0 driver for a particular VM instance.
362 *
363 * @returns VBox status code.
364 *
365 * @param pGVM The global (ring-0) VM structure.
366 * @param uSvnRev The SVN revision of the ring-3 part.
367 * @param uBuildType Build type indicator.
368 * @thread EMT(0)
369 */
370static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
371{
372 VMM_CHECK_SMAP_SETUP();
373 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
374
375 /*
376 * Match the SVN revisions and build type.
377 */
378 if (uSvnRev != VMMGetSvnRev())
379 {
380 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
381 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
382 return VERR_VMM_R0_VERSION_MISMATCH;
383 }
384 if (uBuildType != vmmGetBuildType())
385 {
386 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
387 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
388 return VERR_VMM_R0_VERSION_MISMATCH;
389 }
390
391 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
392 if (RT_FAILURE(rc))
393 return rc;
394
395#ifdef LOG_ENABLED
396 /*
397 * Register the EMT R0 logger instance for VCPU 0.
398 */
399 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
400
401 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
402 if (pR0Logger)
403 {
404# if 0 /* testing of the logger. */
405 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
406 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
407 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
408 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
409
410 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
411 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
412 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
413 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
414
415 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
416 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
417 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
418 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
419
420 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
421 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
422 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
423 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
424 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
425 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
426
427 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
428 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
429
430 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
431 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
432 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
433# endif
434 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pGVM->pSession));
435 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
436 pR0Logger->fRegistered = true;
437 }
438#endif /* LOG_ENABLED */
439
440 /*
441 * Check if the host supports high resolution timers or not.
442 */
443 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
444 && !RTTimerCanDoHighResolution())
445 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
446
447 /*
448 * Initialize the per VM data for GVMM and GMM.
449 */
450 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
451 rc = GVMMR0InitVM(pGVM);
452 if (RT_SUCCESS(rc))
453 {
454 /*
455 * Init HM, CPUM and PGM (Darwin only).
456 */
457 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
458 rc = HMR0InitVM(pGVM);
459 if (RT_SUCCESS(rc))
460 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
461 if (RT_SUCCESS(rc))
462 {
463 rc = CPUMR0InitVM(pGVM);
464 if (RT_SUCCESS(rc))
465 {
466 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
467 rc = PGMR0InitVM(pGVM);
468 if (RT_SUCCESS(rc))
469 {
470 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
471 rc = EMR0InitVM(pGVM);
472 if (RT_SUCCESS(rc))
473 {
474 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
475#ifdef VBOX_WITH_PCI_PASSTHROUGH
476 rc = PciRawR0InitVM(pGVM);
477#endif
478 if (RT_SUCCESS(rc))
479 {
480 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
481 rc = GIMR0InitVM(pGVM);
482 if (RT_SUCCESS(rc))
483 {
484 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION);
485 if (RT_SUCCESS(rc))
486 {
487 GVMMR0DoneInitVM(pGVM);
488
489 /*
490 * Collect a bit of info for the VM release log.
491 */
492 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
493 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
494
495 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
496 return rc;
497 }
498
499 /* bail out*/
500 GIMR0TermVM(pGVM);
501 }
502#ifdef VBOX_WITH_PCI_PASSTHROUGH
503 PciRawR0TermVM(pGVM);
504#endif
505 }
506 }
507 }
508 }
509 HMR0TermVM(pGVM);
510 }
511 }
512
513 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
514 return rc;
515}
516
517
518/**
519 * Does EMT specific VM initialization.
520 *
521 * @returns VBox status code.
522 * @param pGVM The ring-0 VM structure.
523 * @param idCpu The EMT that's calling.
524 */
525static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
526{
527 /* Paranoia (caller checked these already). */
528 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
529 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
530
531#ifdef LOG_ENABLED
532 /*
533 * Registration of ring 0 loggers.
534 */
535 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
536 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
537 if ( pR0Logger
538 && !pR0Logger->fRegistered)
539 {
540 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
541 pR0Logger->fRegistered = true;
542 }
543#endif
544
545 return VINF_SUCCESS;
546}
547
548
549
550/**
551 * Terminates the R0 bits for a particular VM instance.
552 *
553 * This is normally called by ring-3 as part of the VM termination process, but
554 * may alternatively be called during the support driver session cleanup when
555 * the VM object is destroyed (see GVMM).
556 *
557 * @returns VBox status code.
558 *
559 * @param pGVM The global (ring-0) VM structure.
560 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
561 * thread.
562 * @thread EMT(0) or session clean up thread.
563 */
564VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
565{
566 /*
567 * Check EMT(0) claim if we're called from userland.
568 */
569 if (idCpu != NIL_VMCPUID)
570 {
571 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
572 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
573 if (RT_FAILURE(rc))
574 return rc;
575 }
576
577#ifdef VBOX_WITH_PCI_PASSTHROUGH
578 PciRawR0TermVM(pGVM);
579#endif
580
581 /*
582 * Tell GVMM what we're up to and check that we only do this once.
583 */
584 if (GVMMR0DoingTermVM(pGVM))
585 {
586 GIMR0TermVM(pGVM);
587
588 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
589 * here to make sure we don't leak any shared pages if we crash... */
590#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
591 PGMR0DynMapTermVM(pGVM);
592#endif
593 HMR0TermVM(pGVM);
594 }
595
596 /*
597 * Deregister the logger.
598 */
599 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
600 return VINF_SUCCESS;
601}
602
603
604/**
605 * An interrupt or unhalt force flag is set, deal with it.
606 *
607 * @returns VINF_SUCCESS (or VINF_EM_HALT).
608 * @param pVCpu The cross context virtual CPU structure.
609 * @param uMWait Result from EMMonitorWaitIsActive().
610 * @param enmInterruptibility Guest CPU interruptbility level.
611 */
612static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
613{
614 Assert(!TRPMHasTrap(pVCpu));
615 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
616 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
617
618 /*
619 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
620 */
621 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
622 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
623 {
624 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
625 {
626 uint8_t u8Interrupt = 0;
627 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
628 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
629 if (RT_SUCCESS(rc))
630 {
631 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
632
633 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
634 AssertRCSuccess(rc);
635 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
636 return rc;
637 }
638 }
639 }
640 /*
641 * SMI is not implemented yet, at least not here.
642 */
643 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
644 {
645 return VINF_EM_HALT;
646 }
647 /*
648 * NMI.
649 */
650 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
651 {
652 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
653 {
654 /** @todo later. */
655 return VINF_EM_HALT;
656 }
657 }
658 /*
659 * Nested-guest virtual interrupt.
660 */
661 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
662 {
663 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
664 {
665 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
666 * here before injecting the virtual interrupt. See emR3ForcedActions
667 * for details. */
668 return VINF_EM_HALT;
669 }
670 }
671
672 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
673 {
674 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
675 return VINF_SUCCESS;
676 }
677 if (uMWait > 1)
678 {
679 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
680 return VINF_SUCCESS;
681 }
682
683 return VINF_EM_HALT;
684}
685
686
687/**
688 * This does one round of vmR3HaltGlobal1Halt().
689 *
690 * The rational here is that we'll reduce latency in interrupt situations if we
691 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
692 * MWAIT), but do one round of blocking here instead and hope the interrupt is
693 * raised in the meanwhile.
694 *
695 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
696 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
697 * ring-0 call (unless we're too close to a timer event). When the interrupt
698 * wakes us up, we'll return from ring-0 and EM will by instinct do a
699 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
700 * back to VMMR0EntryFast().
701 *
702 * @returns VINF_SUCCESS or VINF_EM_HALT.
703 * @param pGVM The ring-0 VM structure.
704 * @param pGVCpu The ring-0 virtual CPU structure.
705 *
706 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
707 * the VM module, probably to VMM. Then this would be more weird wrt
708 * parameters and statistics.
709 */
710static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
711{
712 /*
713 * Do spin stat historization.
714 */
715 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
716 { /* likely */ }
717 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
718 {
719 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
720 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
721 }
722 else
723 {
724 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
725 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
726 }
727
728 /*
729 * Flags that makes us go to ring-3.
730 */
731 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
732 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
733 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
734 | VM_FF_PGM_NO_MEMORY | VM_FF_DEBUG_SUSPEND;
735 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
736 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
737 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
738 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
739
740 /*
741 * Check preconditions.
742 */
743 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
744 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
745 if ( pGVCpu->vmm.s.fMayHaltInRing0
746 && !TRPMHasTrap(pGVCpu)
747 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
748 || uMWait > 1))
749 {
750 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
751 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
752 {
753 /*
754 * Interrupts pending already?
755 */
756 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
757 APICUpdatePendingInterrupts(pGVCpu);
758
759 /*
760 * Flags that wake up from the halted state.
761 */
762 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
763 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
764
765 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
766 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
767 ASMNopPause();
768
769 /*
770 * Check out how long till the next timer event.
771 */
772 uint64_t u64Delta;
773 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
774
775 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
776 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
777 {
778 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
779 APICUpdatePendingInterrupts(pGVCpu);
780
781 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
782 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
783
784 /*
785 * Wait if there is enough time to the next timer event.
786 */
787 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
788 {
789 /* If there are few other CPU cores around, we will procrastinate a
790 little before going to sleep, hoping for some device raising an
791 interrupt or similar. Though, the best thing here would be to
792 dynamically adjust the spin count according to its usfulness or
793 something... */
794 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
795 && RTMpGetOnlineCount() >= 4)
796 {
797 /** @todo Figure out how we can skip this if it hasn't help recently...
798 * @bugref{9172#c12} */
799 uint32_t cSpinLoops = 42;
800 while (cSpinLoops-- > 0)
801 {
802 ASMNopPause();
803 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
804 APICUpdatePendingInterrupts(pGVCpu);
805 ASMNopPause();
806 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
807 {
808 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
809 return VINF_EM_HALT;
810 }
811 ASMNopPause();
812 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
813 {
814 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
815 return VINF_EM_HALT;
816 }
817 ASMNopPause();
818 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
819 {
820 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
821 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
822 }
823 ASMNopPause();
824 }
825 }
826
827 /* Block. We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
828 knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here). */
829 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED);
830 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
831 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
832 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
833 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
834 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
835 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
836 if ( rc == VINF_SUCCESS
837 || rc == VERR_INTERRUPTED)
838
839 {
840 /* Keep some stats like ring-3 does. */
841 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
842 if (cNsOverslept > 50000)
843 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
844 else if (cNsOverslept < -50000)
845 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
846 else
847 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
848
849 /*
850 * Recheck whether we can resume execution or have to go to ring-3.
851 */
852 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
853 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
854 {
855 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
856 APICUpdatePendingInterrupts(pGVCpu);
857 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
858 {
859 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
860 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
861 }
862 }
863 }
864 }
865 }
866 }
867 }
868 return VINF_EM_HALT;
869}
870
871
872/**
873 * VMM ring-0 thread-context callback.
874 *
875 * This does common HM state updating and calls the HM-specific thread-context
876 * callback.
877 *
878 * @param enmEvent The thread-context event.
879 * @param pvUser Opaque pointer to the VMCPU.
880 *
881 * @thread EMT(pvUser)
882 */
883static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
884{
885 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
886
887 switch (enmEvent)
888 {
889 case RTTHREADCTXEVENT_IN:
890 {
891 /*
892 * Linux may call us with preemption enabled (really!) but technically we
893 * cannot get preempted here, otherwise we end up in an infinite recursion
894 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
895 * ad infinitum). Let's just disable preemption for now...
896 */
897 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
898 * preemption after doing the callout (one or two functions up the
899 * call chain). */
900 /** @todo r=ramshankar: See @bugref{5313#c30}. */
901 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
902 RTThreadPreemptDisable(&ParanoidPreemptState);
903
904 /* We need to update the VCPU <-> host CPU mapping. */
905 RTCPUID idHostCpu;
906 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
907 pVCpu->iHostCpuSet = iHostCpuSet;
908 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
909
910 /* In the very unlikely event that the GIP delta for the CPU we're
911 rescheduled needs calculating, try force a return to ring-3.
912 We unfortunately cannot do the measurements right here. */
913 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
914 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
915
916 /* Invoke the HM-specific thread-context callback. */
917 HMR0ThreadCtxCallback(enmEvent, pvUser);
918
919 /* Restore preemption. */
920 RTThreadPreemptRestore(&ParanoidPreemptState);
921 break;
922 }
923
924 case RTTHREADCTXEVENT_OUT:
925 {
926 /* Invoke the HM-specific thread-context callback. */
927 HMR0ThreadCtxCallback(enmEvent, pvUser);
928
929 /*
930 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
931 * have the same host CPU associated with it.
932 */
933 pVCpu->iHostCpuSet = UINT32_MAX;
934 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
935 break;
936 }
937
938 default:
939 /* Invoke the HM-specific thread-context callback. */
940 HMR0ThreadCtxCallback(enmEvent, pvUser);
941 break;
942 }
943}
944
945
946/**
947 * Creates thread switching hook for the current EMT thread.
948 *
949 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
950 * platform does not implement switcher hooks, no hooks will be create and the
951 * member set to NIL_RTTHREADCTXHOOK.
952 *
953 * @returns VBox status code.
954 * @param pVCpu The cross context virtual CPU structure.
955 * @thread EMT(pVCpu)
956 */
957VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
958{
959 VMCPU_ASSERT_EMT(pVCpu);
960 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
961
962#if 1 /* To disable this stuff change to zero. */
963 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
964 if (RT_SUCCESS(rc))
965 return rc;
966#else
967 RT_NOREF(vmmR0ThreadCtxCallback);
968 int rc = VERR_NOT_SUPPORTED;
969#endif
970
971 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
972 if (rc == VERR_NOT_SUPPORTED)
973 return VINF_SUCCESS;
974
975 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
976 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
977}
978
979
980/**
981 * Destroys the thread switching hook for the specified VCPU.
982 *
983 * @param pVCpu The cross context virtual CPU structure.
984 * @remarks Can be called from any thread.
985 */
986VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
987{
988 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
989 AssertRC(rc);
990 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
991}
992
993
994/**
995 * Disables the thread switching hook for this VCPU (if we got one).
996 *
997 * @param pVCpu The cross context virtual CPU structure.
998 * @thread EMT(pVCpu)
999 *
1000 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
1001 * this call. This means you have to be careful with what you do!
1002 */
1003VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1004{
1005 /*
1006 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1007 * @bugref{7726#c19} explains the need for this trick:
1008 *
1009 * VMXR0CallRing3Callback/SVMR0CallRing3Callback &
1010 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1011 * longjmp & normal return to ring-3, which opens a window where we may be
1012 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
1013 * the CPU starts executing a different EMT. Both functions first disables
1014 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1015 * an opening for getting preempted.
1016 */
1017 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1018 * all the time. */
1019 /** @todo move this into the context hook disabling if(). */
1020 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1021
1022 /*
1023 * Disable the context hook, if we got one.
1024 */
1025 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1026 {
1027 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1028 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1029 AssertRC(rc);
1030 }
1031}
1032
1033
1034/**
1035 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1036 *
1037 * @returns true if registered, false otherwise.
1038 * @param pVCpu The cross context virtual CPU structure.
1039 */
1040DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1041{
1042 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
1043}
1044
1045
1046/**
1047 * Whether thread-context hooks are registered for this VCPU.
1048 *
1049 * @returns true if registered, false otherwise.
1050 * @param pVCpu The cross context virtual CPU structure.
1051 */
1052VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1053{
1054 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1055}
1056
1057
1058/**
1059 * Returns the ring-0 release logger instance.
1060 *
1061 * @returns Pointer to release logger, NULL if not configured.
1062 * @param pVCpu The cross context virtual CPU structure of the caller.
1063 * @thread EMT(pVCpu)
1064 */
1065VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu)
1066{
1067 PVMMR0LOGGER pLogger = pVCpu->vmm.s.pR0RelLoggerR0;
1068 if (pLogger)
1069 return &pLogger->Logger;
1070 return NULL;
1071}
1072
1073
1074#ifdef VBOX_WITH_STATISTICS
1075/**
1076 * Record return code statistics
1077 * @param pVM The cross context VM structure.
1078 * @param pVCpu The cross context virtual CPU structure.
1079 * @param rc The status code.
1080 */
1081static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1082{
1083 /*
1084 * Collect statistics.
1085 */
1086 switch (rc)
1087 {
1088 case VINF_SUCCESS:
1089 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1090 break;
1091 case VINF_EM_RAW_INTERRUPT:
1092 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1093 break;
1094 case VINF_EM_RAW_INTERRUPT_HYPER:
1095 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1096 break;
1097 case VINF_EM_RAW_GUEST_TRAP:
1098 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1099 break;
1100 case VINF_EM_RAW_RING_SWITCH:
1101 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1102 break;
1103 case VINF_EM_RAW_RING_SWITCH_INT:
1104 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1105 break;
1106 case VINF_EM_RAW_STALE_SELECTOR:
1107 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1108 break;
1109 case VINF_EM_RAW_IRET_TRAP:
1110 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1111 break;
1112 case VINF_IOM_R3_IOPORT_READ:
1113 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1114 break;
1115 case VINF_IOM_R3_IOPORT_WRITE:
1116 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1117 break;
1118 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1119 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1120 break;
1121 case VINF_IOM_R3_MMIO_READ:
1122 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1123 break;
1124 case VINF_IOM_R3_MMIO_WRITE:
1125 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1126 break;
1127 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1128 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1129 break;
1130 case VINF_IOM_R3_MMIO_READ_WRITE:
1131 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1132 break;
1133 case VINF_PATM_HC_MMIO_PATCH_READ:
1134 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1135 break;
1136 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1137 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1138 break;
1139 case VINF_CPUM_R3_MSR_READ:
1140 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1141 break;
1142 case VINF_CPUM_R3_MSR_WRITE:
1143 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1144 break;
1145 case VINF_EM_RAW_EMULATE_INSTR:
1146 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1147 break;
1148 case VINF_PATCH_EMULATE_INSTR:
1149 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1150 break;
1151 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1152 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1153 break;
1154 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1155 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1156 break;
1157 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1158 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1159 break;
1160 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1161 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1162 break;
1163 case VINF_CSAM_PENDING_ACTION:
1164 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1165 break;
1166 case VINF_PGM_SYNC_CR3:
1167 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1168 break;
1169 case VINF_PATM_PATCH_INT3:
1170 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1171 break;
1172 case VINF_PATM_PATCH_TRAP_PF:
1173 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1174 break;
1175 case VINF_PATM_PATCH_TRAP_GP:
1176 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1177 break;
1178 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1179 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1180 break;
1181 case VINF_EM_RESCHEDULE_REM:
1182 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1183 break;
1184 case VINF_EM_RAW_TO_R3:
1185 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1186 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1187 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1188 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1189 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1190 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1191 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1192 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1193 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1194 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1195 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1196 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1197 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1198 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1199 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1200 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1201 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1202 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1203 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1204 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1205 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1206 else
1207 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1208 break;
1209
1210 case VINF_EM_RAW_TIMER_PENDING:
1211 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1212 break;
1213 case VINF_EM_RAW_INTERRUPT_PENDING:
1214 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1215 break;
1216 case VINF_VMM_CALL_HOST:
1217 switch (pVCpu->vmm.s.enmCallRing3Operation)
1218 {
1219 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1220 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1221 break;
1222 case VMMCALLRING3_PDM_LOCK:
1223 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1224 break;
1225 case VMMCALLRING3_PGM_POOL_GROW:
1226 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1227 break;
1228 case VMMCALLRING3_PGM_LOCK:
1229 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1230 break;
1231 case VMMCALLRING3_PGM_MAP_CHUNK:
1232 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1233 break;
1234 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1235 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1236 break;
1237 case VMMCALLRING3_VMM_LOGGER_FLUSH:
1238 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
1239 break;
1240 case VMMCALLRING3_VM_SET_ERROR:
1241 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1242 break;
1243 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1244 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1245 break;
1246 case VMMCALLRING3_VM_R0_ASSERTION:
1247 default:
1248 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1249 break;
1250 }
1251 break;
1252 case VINF_PATM_DUPLICATE_FUNCTION:
1253 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1254 break;
1255 case VINF_PGM_CHANGE_MODE:
1256 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1257 break;
1258 case VINF_PGM_POOL_FLUSH_PENDING:
1259 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1260 break;
1261 case VINF_EM_PENDING_REQUEST:
1262 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1263 break;
1264 case VINF_EM_HM_PATCH_TPR_INSTR:
1265 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1266 break;
1267 default:
1268 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1269 break;
1270 }
1271}
1272#endif /* VBOX_WITH_STATISTICS */
1273
1274
1275/**
1276 * The Ring 0 entry point, called by the fast-ioctl path.
1277 *
1278 * @param pGVM The global (ring-0) VM structure.
1279 * @param pVMIgnored The cross context VM structure. The return code is
1280 * stored in pVM->vmm.s.iLastGZRc.
1281 * @param idCpu The Virtual CPU ID of the calling EMT.
1282 * @param enmOperation Which operation to execute.
1283 * @remarks Assume called with interrupts _enabled_.
1284 */
1285VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1286{
1287 RT_NOREF(pVMIgnored);
1288
1289 /*
1290 * Validation.
1291 */
1292 if ( idCpu < pGVM->cCpus
1293 && pGVM->cCpus == pGVM->cCpusUnsafe)
1294 { /*likely*/ }
1295 else
1296 {
1297 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1298 return;
1299 }
1300
1301 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1302 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1303 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1304 && pGVCpu->hNativeThreadR0 == hNativeThread))
1305 { /* likely */ }
1306 else
1307 {
1308 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1309 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1310 return;
1311 }
1312
1313 /*
1314 * SMAP fun.
1315 */
1316 VMM_CHECK_SMAP_SETUP();
1317 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1318
1319 /*
1320 * Perform requested operation.
1321 */
1322 switch (enmOperation)
1323 {
1324 /*
1325 * Run guest code using the available hardware acceleration technology.
1326 */
1327 case VMMR0_DO_HM_RUN:
1328 {
1329 for (;;) /* hlt loop */
1330 {
1331 /*
1332 * Disable preemption.
1333 */
1334 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1335 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1336 RTThreadPreemptDisable(&PreemptState);
1337
1338 /*
1339 * Get the host CPU identifiers, make sure they are valid and that
1340 * we've got a TSC delta for the CPU.
1341 */
1342 RTCPUID idHostCpu;
1343 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1344 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1345 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1346 {
1347 pGVCpu->iHostCpuSet = iHostCpuSet;
1348 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1349
1350 /*
1351 * Update the periodic preemption timer if it's active.
1352 */
1353 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1354 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1355 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1356
1357#ifdef VMM_R0_TOUCH_FPU
1358 /*
1359 * Make sure we've got the FPU state loaded so and we don't need to clear
1360 * CR0.TS and get out of sync with the host kernel when loading the guest
1361 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1362 */
1363 CPUMR0TouchHostFpu();
1364#endif
1365 int rc;
1366 bool fPreemptRestored = false;
1367 if (!HMR0SuspendPending())
1368 {
1369 /*
1370 * Enable the context switching hook.
1371 */
1372 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1373 {
1374 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmm.s.hCtxHook));
1375 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1376 }
1377
1378 /*
1379 * Enter HM context.
1380 */
1381 rc = HMR0Enter(pGVCpu);
1382 if (RT_SUCCESS(rc))
1383 {
1384 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1385
1386 /*
1387 * When preemption hooks are in place, enable preemption now that
1388 * we're in HM context.
1389 */
1390 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1391 {
1392 fPreemptRestored = true;
1393 RTThreadPreemptRestore(&PreemptState);
1394 }
1395
1396 /*
1397 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1398 */
1399 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1400 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);
1401 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1402
1403 /*
1404 * Assert sanity on the way out. Using manual assertions code here as normal
1405 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1406 */
1407 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1408 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1409 {
1410 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1411 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1412 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1413 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1414 }
1415 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1416 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1417 {
1418 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1419 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1420 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1421 rc = VERR_INVALID_STATE;
1422 }
1423
1424 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1425 }
1426 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1427
1428 /*
1429 * Invalidate the host CPU identifiers before we disable the context
1430 * hook / restore preemption.
1431 */
1432 pGVCpu->iHostCpuSet = UINT32_MAX;
1433 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1434
1435 /*
1436 * Disable context hooks. Due to unresolved cleanup issues, we
1437 * cannot leave the hooks enabled when we return to ring-3.
1438 *
1439 * Note! At the moment HM may also have disabled the hook
1440 * when we get here, but the IPRT API handles that.
1441 */
1442 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1443 {
1444 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1445 RTThreadCtxHookDisable(pGVCpu->vmm.s.hCtxHook);
1446 }
1447 }
1448 /*
1449 * The system is about to go into suspend mode; go back to ring 3.
1450 */
1451 else
1452 {
1453 rc = VINF_EM_RAW_INTERRUPT;
1454 pGVCpu->iHostCpuSet = UINT32_MAX;
1455 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1456 }
1457
1458 /** @todo When HM stops messing with the context hook state, we'll disable
1459 * preemption again before the RTThreadCtxHookDisable call. */
1460 if (!fPreemptRestored)
1461 RTThreadPreemptRestore(&PreemptState);
1462
1463 pGVCpu->vmm.s.iLastGZRc = rc;
1464
1465 /* Fire dtrace probe and collect statistics. */
1466 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1467#ifdef VBOX_WITH_STATISTICS
1468 vmmR0RecordRC(pGVM, pGVCpu, rc);
1469#endif
1470#if 1
1471 /*
1472 * If this is a halt.
1473 */
1474 if (rc != VINF_EM_HALT)
1475 { /* we're not in a hurry for a HLT, so prefer this path */ }
1476 else
1477 {
1478 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1479 if (rc == VINF_SUCCESS)
1480 {
1481 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1482 continue;
1483 }
1484 pGVCpu->vmm.s.cR0HaltsToRing3++;
1485 }
1486#endif
1487 }
1488 /*
1489 * Invalid CPU set index or TSC delta in need of measuring.
1490 */
1491 else
1492 {
1493 pGVCpu->iHostCpuSet = UINT32_MAX;
1494 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1495 RTThreadPreemptRestore(&PreemptState);
1496 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1497 {
1498 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1499 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1500 0 /*default cTries*/);
1501 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1502 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1503 else
1504 pGVCpu->vmm.s.iLastGZRc = rc;
1505 }
1506 else
1507 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1508 }
1509 break;
1510
1511 } /* halt loop. */
1512 break;
1513 }
1514
1515#ifdef VBOX_WITH_NEM_R0
1516# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1517 case VMMR0_DO_NEM_RUN:
1518 {
1519 /*
1520 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1521 */
1522 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1523# ifdef VBOXSTRICTRC_STRICT_ENABLED
1524 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1525# else
1526 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1527# endif
1528 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1529 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1530
1531 pGVCpu->vmm.s.iLastGZRc = rc;
1532
1533 /*
1534 * Fire dtrace probe and collect statistics.
1535 */
1536 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1537# ifdef VBOX_WITH_STATISTICS
1538 vmmR0RecordRC(pGVM, pGVCpu, rc);
1539# endif
1540 break;
1541 }
1542# endif
1543#endif
1544
1545 /*
1546 * For profiling.
1547 */
1548 case VMMR0_DO_NOP:
1549 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1550 break;
1551
1552 /*
1553 * Shouldn't happen.
1554 */
1555 default:
1556 AssertMsgFailed(("%#x\n", enmOperation));
1557 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1558 break;
1559 }
1560 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1561}
1562
1563
1564/**
1565 * Validates a session or VM session argument.
1566 *
1567 * @returns true / false accordingly.
1568 * @param pGVM The global (ring-0) VM structure.
1569 * @param pClaimedSession The session claim to validate.
1570 * @param pSession The session argument.
1571 */
1572DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1573{
1574 /* This must be set! */
1575 if (!pSession)
1576 return false;
1577
1578 /* Only one out of the two. */
1579 if (pGVM && pClaimedSession)
1580 return false;
1581 if (pGVM)
1582 pClaimedSession = pGVM->pSession;
1583 return pClaimedSession == pSession;
1584}
1585
1586
1587/**
1588 * VMMR0EntryEx worker function, either called directly or when ever possible
1589 * called thru a longjmp so we can exit safely on failure.
1590 *
1591 * @returns VBox status code.
1592 * @param pGVM The global (ring-0) VM structure.
1593 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1594 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1595 * @param enmOperation Which operation to execute.
1596 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1597 * The support driver validates this if it's present.
1598 * @param u64Arg Some simple constant argument.
1599 * @param pSession The session of the caller.
1600 *
1601 * @remarks Assume called with interrupts _enabled_.
1602 */
1603static int vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1604 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1605{
1606 /*
1607 * Validate pGVM and idCpu for consistency and validity.
1608 */
1609 if (pGVM != NULL)
1610 {
1611 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0))
1612 { /* likely */ }
1613 else
1614 {
1615 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1616 return VERR_INVALID_POINTER;
1617 }
1618
1619 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1620 { /* likely */ }
1621 else
1622 {
1623 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1624 return VERR_INVALID_PARAMETER;
1625 }
1626
1627 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1628 && pGVM->enmVMState <= VMSTATE_TERMINATED
1629 && pGVM->pSession == pSession
1630 && pGVM->pSelf == pGVM))
1631 { /* likely */ }
1632 else
1633 {
1634 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1635 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1636 return VERR_INVALID_POINTER;
1637 }
1638 }
1639 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1640 { /* likely */ }
1641 else
1642 {
1643 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1644 return VERR_INVALID_PARAMETER;
1645 }
1646
1647 /*
1648 * SMAP fun.
1649 */
1650 VMM_CHECK_SMAP_SETUP();
1651 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1652
1653 /*
1654 * Process the request.
1655 */
1656 int rc;
1657 switch (enmOperation)
1658 {
1659 /*
1660 * GVM requests
1661 */
1662 case VMMR0_DO_GVMM_CREATE_VM:
1663 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1664 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1665 else
1666 rc = VERR_INVALID_PARAMETER;
1667 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1668 break;
1669
1670 case VMMR0_DO_GVMM_DESTROY_VM:
1671 if (pReqHdr == NULL && u64Arg == 0)
1672 rc = GVMMR0DestroyVM(pGVM);
1673 else
1674 rc = VERR_INVALID_PARAMETER;
1675 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1676 break;
1677
1678 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1679 if (pGVM != NULL)
1680 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1681 else
1682 rc = VERR_INVALID_PARAMETER;
1683 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1684 break;
1685
1686 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1687 if (pGVM != NULL)
1688 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1689 else
1690 rc = VERR_INVALID_PARAMETER;
1691 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1692 break;
1693
1694 case VMMR0_DO_GVMM_SCHED_HALT:
1695 if (pReqHdr)
1696 return VERR_INVALID_PARAMETER;
1697 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1698 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1699 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1700 break;
1701
1702 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1703 if (pReqHdr || u64Arg)
1704 return VERR_INVALID_PARAMETER;
1705 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1706 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1707 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1708 break;
1709
1710 case VMMR0_DO_GVMM_SCHED_POKE:
1711 if (pReqHdr || u64Arg)
1712 return VERR_INVALID_PARAMETER;
1713 rc = GVMMR0SchedPoke(pGVM, idCpu);
1714 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1715 break;
1716
1717 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1718 if (u64Arg)
1719 return VERR_INVALID_PARAMETER;
1720 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1721 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1722 break;
1723
1724 case VMMR0_DO_GVMM_SCHED_POLL:
1725 if (pReqHdr || u64Arg > 1)
1726 return VERR_INVALID_PARAMETER;
1727 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
1728 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1729 break;
1730
1731 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1732 if (u64Arg)
1733 return VERR_INVALID_PARAMETER;
1734 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1735 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1736 break;
1737
1738 case VMMR0_DO_GVMM_RESET_STATISTICS:
1739 if (u64Arg)
1740 return VERR_INVALID_PARAMETER;
1741 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1742 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1743 break;
1744
1745 /*
1746 * Initialize the R0 part of a VM instance.
1747 */
1748 case VMMR0_DO_VMMR0_INIT:
1749 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1750 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1751 break;
1752
1753 /*
1754 * Does EMT specific ring-0 init.
1755 */
1756 case VMMR0_DO_VMMR0_INIT_EMT:
1757 rc = vmmR0InitVMEmt(pGVM, idCpu);
1758 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1759 break;
1760
1761 /*
1762 * Terminate the R0 part of a VM instance.
1763 */
1764 case VMMR0_DO_VMMR0_TERM:
1765 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
1766 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1767 break;
1768
1769 /*
1770 * Attempt to enable hm mode and check the current setting.
1771 */
1772 case VMMR0_DO_HM_ENABLE:
1773 rc = HMR0EnableAllCpus(pGVM);
1774 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1775 break;
1776
1777 /*
1778 * Setup the hardware accelerated session.
1779 */
1780 case VMMR0_DO_HM_SETUP_VM:
1781 rc = HMR0SetupVM(pGVM);
1782 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1783 break;
1784
1785 /*
1786 * PGM wrappers.
1787 */
1788 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1789 if (idCpu == NIL_VMCPUID)
1790 return VERR_INVALID_CPU_ID;
1791 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
1792 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1793 break;
1794
1795 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1796 if (idCpu == NIL_VMCPUID)
1797 return VERR_INVALID_CPU_ID;
1798 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
1799 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1800 break;
1801
1802 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1803 if (idCpu == NIL_VMCPUID)
1804 return VERR_INVALID_CPU_ID;
1805 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, idCpu);
1806 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1807 break;
1808
1809 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1810 if (idCpu != 0)
1811 return VERR_INVALID_CPU_ID;
1812 rc = PGMR0PhysSetupIoMmu(pGVM);
1813 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1814 break;
1815
1816 case VMMR0_DO_PGM_POOL_GROW:
1817 if (idCpu == NIL_VMCPUID)
1818 return VERR_INVALID_CPU_ID;
1819 rc = PGMR0PoolGrow(pGVM);
1820 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1821 break;
1822
1823 /*
1824 * GMM wrappers.
1825 */
1826 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1827 if (u64Arg)
1828 return VERR_INVALID_PARAMETER;
1829 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1830 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1831 break;
1832
1833 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1834 if (u64Arg)
1835 return VERR_INVALID_PARAMETER;
1836 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1837 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1838 break;
1839
1840 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1841 if (u64Arg)
1842 return VERR_INVALID_PARAMETER;
1843 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1844 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1845 break;
1846
1847 case VMMR0_DO_GMM_FREE_PAGES:
1848 if (u64Arg)
1849 return VERR_INVALID_PARAMETER;
1850 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1851 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1852 break;
1853
1854 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1855 if (u64Arg)
1856 return VERR_INVALID_PARAMETER;
1857 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1858 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1859 break;
1860
1861 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1862 if (u64Arg)
1863 return VERR_INVALID_PARAMETER;
1864 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1865 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1866 break;
1867
1868 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1869 if (idCpu == NIL_VMCPUID)
1870 return VERR_INVALID_CPU_ID;
1871 if (u64Arg)
1872 return VERR_INVALID_PARAMETER;
1873 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1874 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1875 break;
1876
1877 case VMMR0_DO_GMM_BALLOONED_PAGES:
1878 if (u64Arg)
1879 return VERR_INVALID_PARAMETER;
1880 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1881 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1882 break;
1883
1884 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1885 if (u64Arg)
1886 return VERR_INVALID_PARAMETER;
1887 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1888 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1889 break;
1890
1891 case VMMR0_DO_GMM_SEED_CHUNK:
1892 if (pReqHdr)
1893 return VERR_INVALID_PARAMETER;
1894 rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg);
1895 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1896 break;
1897
1898 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1899 if (idCpu == NIL_VMCPUID)
1900 return VERR_INVALID_CPU_ID;
1901 if (u64Arg)
1902 return VERR_INVALID_PARAMETER;
1903 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1904 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1905 break;
1906
1907 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1908 if (idCpu == NIL_VMCPUID)
1909 return VERR_INVALID_CPU_ID;
1910 if (u64Arg)
1911 return VERR_INVALID_PARAMETER;
1912 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1913 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1914 break;
1915
1916 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1917 if (idCpu == NIL_VMCPUID)
1918 return VERR_INVALID_CPU_ID;
1919 if ( u64Arg
1920 || pReqHdr)
1921 return VERR_INVALID_PARAMETER;
1922 rc = GMMR0ResetSharedModules(pGVM, idCpu);
1923 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1924 break;
1925
1926#ifdef VBOX_WITH_PAGE_SHARING
1927 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1928 {
1929 if (idCpu == NIL_VMCPUID)
1930 return VERR_INVALID_CPU_ID;
1931 if ( u64Arg
1932 || pReqHdr)
1933 return VERR_INVALID_PARAMETER;
1934 rc = GMMR0CheckSharedModules(pGVM, idCpu);
1935 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1936 break;
1937 }
1938#endif
1939
1940#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1941 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1942 if (u64Arg)
1943 return VERR_INVALID_PARAMETER;
1944 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1945 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1946 break;
1947#endif
1948
1949 case VMMR0_DO_GMM_QUERY_STATISTICS:
1950 if (u64Arg)
1951 return VERR_INVALID_PARAMETER;
1952 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1953 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1954 break;
1955
1956 case VMMR0_DO_GMM_RESET_STATISTICS:
1957 if (u64Arg)
1958 return VERR_INVALID_PARAMETER;
1959 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1960 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1961 break;
1962
1963 /*
1964 * A quick GCFGM mock-up.
1965 */
1966 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1967 case VMMR0_DO_GCFGM_SET_VALUE:
1968 case VMMR0_DO_GCFGM_QUERY_VALUE:
1969 {
1970 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1971 return VERR_INVALID_PARAMETER;
1972 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1973 if (pReq->Hdr.cbReq != sizeof(*pReq))
1974 return VERR_INVALID_PARAMETER;
1975 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1976 {
1977 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1978 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1979 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1980 }
1981 else
1982 {
1983 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1984 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1985 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1986 }
1987 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1988 break;
1989 }
1990
1991 /*
1992 * PDM Wrappers.
1993 */
1994 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1995 {
1996 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1997 return VERR_INVALID_PARAMETER;
1998 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1999 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2000 break;
2001 }
2002
2003 case VMMR0_DO_PDM_DEVICE_CREATE:
2004 {
2005 if (!pReqHdr || u64Arg || idCpu != 0)
2006 return VERR_INVALID_PARAMETER;
2007 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
2008 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2009 break;
2010 }
2011
2012 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2013 {
2014 if (!pReqHdr || u64Arg)
2015 return VERR_INVALID_PARAMETER;
2016 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr, idCpu);
2017 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2018 break;
2019 }
2020
2021 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2022 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2023 {
2024 if (!pReqHdr || u64Arg || idCpu != 0)
2025 return VERR_INVALID_PARAMETER;
2026 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2027 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2028 break;
2029 }
2030
2031 /*
2032 * Requests to the internal networking service.
2033 */
2034 case VMMR0_DO_INTNET_OPEN:
2035 {
2036 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2037 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2038 return VERR_INVALID_PARAMETER;
2039 rc = IntNetR0OpenReq(pSession, pReq);
2040 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2041 break;
2042 }
2043
2044 case VMMR0_DO_INTNET_IF_CLOSE:
2045 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2046 return VERR_INVALID_PARAMETER;
2047 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2048 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2049 break;
2050
2051
2052 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2053 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2054 return VERR_INVALID_PARAMETER;
2055 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2056 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2057 break;
2058
2059 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2060 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2061 return VERR_INVALID_PARAMETER;
2062 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2063 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2064 break;
2065
2066 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2067 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2068 return VERR_INVALID_PARAMETER;
2069 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2070 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2071 break;
2072
2073 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2074 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2075 return VERR_INVALID_PARAMETER;
2076 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2077 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2078 break;
2079
2080 case VMMR0_DO_INTNET_IF_SEND:
2081 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2082 return VERR_INVALID_PARAMETER;
2083 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2084 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2085 break;
2086
2087 case VMMR0_DO_INTNET_IF_WAIT:
2088 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2089 return VERR_INVALID_PARAMETER;
2090 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2091 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2092 break;
2093
2094 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2095 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2096 return VERR_INVALID_PARAMETER;
2097 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2098 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2099 break;
2100
2101#if 0 //def VBOX_WITH_PCI_PASSTHROUGH
2102 /*
2103 * Requests to host PCI driver service.
2104 */
2105 case VMMR0_DO_PCIRAW_REQ:
2106 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2107 return VERR_INVALID_PARAMETER;
2108 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2109 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2110 break;
2111#endif
2112
2113 /*
2114 * NEM requests.
2115 */
2116#ifdef VBOX_WITH_NEM_R0
2117# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2118 case VMMR0_DO_NEM_INIT_VM:
2119 if (u64Arg || pReqHdr || idCpu != 0)
2120 return VERR_INVALID_PARAMETER;
2121 rc = NEMR0InitVM(pGVM);
2122 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2123 break;
2124
2125 case VMMR0_DO_NEM_INIT_VM_PART_2:
2126 if (u64Arg || pReqHdr || idCpu != 0)
2127 return VERR_INVALID_PARAMETER;
2128 rc = NEMR0InitVMPart2(pGVM);
2129 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2130 break;
2131
2132 case VMMR0_DO_NEM_MAP_PAGES:
2133 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2134 return VERR_INVALID_PARAMETER;
2135 rc = NEMR0MapPages(pGVM, idCpu);
2136 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2137 break;
2138
2139 case VMMR0_DO_NEM_UNMAP_PAGES:
2140 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2141 return VERR_INVALID_PARAMETER;
2142 rc = NEMR0UnmapPages(pGVM, idCpu);
2143 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2144 break;
2145
2146 case VMMR0_DO_NEM_EXPORT_STATE:
2147 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2148 return VERR_INVALID_PARAMETER;
2149 rc = NEMR0ExportState(pGVM, idCpu);
2150 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2151 break;
2152
2153 case VMMR0_DO_NEM_IMPORT_STATE:
2154 if (pReqHdr || idCpu == NIL_VMCPUID)
2155 return VERR_INVALID_PARAMETER;
2156 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2157 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2158 break;
2159
2160 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2161 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2162 return VERR_INVALID_PARAMETER;
2163 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2164 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2165 break;
2166
2167 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2168 if (pReqHdr || idCpu == NIL_VMCPUID)
2169 return VERR_INVALID_PARAMETER;
2170 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2171 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2172 break;
2173
2174 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2175 if (u64Arg || pReqHdr)
2176 return VERR_INVALID_PARAMETER;
2177 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2178 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2179 break;
2180
2181# if 1 && defined(DEBUG_bird)
2182 case VMMR0_DO_NEM_EXPERIMENT:
2183 if (pReqHdr)
2184 return VERR_INVALID_PARAMETER;
2185 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2186 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2187 break;
2188# endif
2189# endif
2190#endif
2191
2192 /*
2193 * IOM requests.
2194 */
2195 case VMMR0_DO_IOM_GROW_IO_PORTS:
2196 {
2197 if (pReqHdr || idCpu != 0)
2198 return VERR_INVALID_PARAMETER;
2199 rc = IOMR0IoPortGrowRegistrationTables(pGVM, u64Arg);
2200 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2201 break;
2202 }
2203
2204 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2205 {
2206 if (pReqHdr || idCpu != 0)
2207 return VERR_INVALID_PARAMETER;
2208 rc = IOMR0IoPortGrowStatisticsTable(pGVM, u64Arg);
2209 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2210 break;
2211 }
2212
2213 case VMMR0_DO_IOM_GROW_MMIO_REGS:
2214 {
2215 if (pReqHdr || idCpu != 0)
2216 return VERR_INVALID_PARAMETER;
2217 rc = IOMR0MmioGrowRegistrationTables(pGVM, u64Arg);
2218 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2219 break;
2220 }
2221
2222 case VMMR0_DO_IOM_GROW_MMIO_STATS:
2223 {
2224 if (pReqHdr || idCpu != 0)
2225 return VERR_INVALID_PARAMETER;
2226 rc = IOMR0MmioGrowStatisticsTable(pGVM, u64Arg);
2227 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2228 break;
2229 }
2230
2231 case VMMR0_DO_IOM_SYNC_STATS_INDICES:
2232 {
2233 if (pReqHdr || idCpu != 0)
2234 return VERR_INVALID_PARAMETER;
2235 rc = IOMR0IoPortSyncStatisticsIndices(pGVM);
2236 if (RT_SUCCESS(rc))
2237 rc = IOMR0MmioSyncStatisticsIndices(pGVM);
2238 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2239 break;
2240 }
2241
2242 /*
2243 * DBGF requests.
2244 */
2245#ifdef VBOX_WITH_DBGF_TRACING
2246 case VMMR0_DO_DBGF_TRACER_CREATE:
2247 {
2248 if (!pReqHdr || u64Arg || idCpu != 0)
2249 return VERR_INVALID_PARAMETER;
2250 rc = DBGFR0TracerCreateReqHandler(pGVM, (PDBGFTRACERCREATEREQ)pReqHdr);
2251 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2252 break;
2253 }
2254
2255 case VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER:
2256 {
2257 if (!pReqHdr || u64Arg)
2258 return VERR_INVALID_PARAMETER;
2259# if 0 /** @todo */
2260 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu);
2261# else
2262 rc = VERR_NOT_IMPLEMENTED;
2263# endif
2264 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2265 break;
2266 }
2267#endif
2268
2269 case VMMR0_DO_DBGF_BP_INIT:
2270 {
2271 if (!pReqHdr || u64Arg || idCpu != 0)
2272 return VERR_INVALID_PARAMETER;
2273 rc = DBGFR0BpInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2274 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2275 break;
2276 }
2277
2278 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2279 {
2280 if (!pReqHdr || u64Arg || idCpu != 0)
2281 return VERR_INVALID_PARAMETER;
2282 rc = DBGFR0BpChunkAllocReqHandler(pGVM, (PDBGFBPCHUNKALLOCREQ)pReqHdr);
2283 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2284 break;
2285 }
2286
2287 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2288 {
2289 if (!pReqHdr || u64Arg || idCpu != 0)
2290 return VERR_INVALID_PARAMETER;
2291 rc = DBGFR0BpL2TblChunkAllocReqHandler(pGVM, (PDBGFBPL2TBLCHUNKALLOCREQ)pReqHdr);
2292 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2293 break;
2294 }
2295
2296 case VMMR0_DO_DBGF_BP_OWNER_INIT:
2297 {
2298 if (!pReqHdr || u64Arg || idCpu != 0)
2299 return VERR_INVALID_PARAMETER;
2300 rc = DBGFR0BpOwnerInitReqHandler(pGVM, (PDBGFBPOWNERINITREQ)pReqHdr);
2301 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2302 break;
2303 }
2304
2305 /*
2306 * For profiling.
2307 */
2308 case VMMR0_DO_NOP:
2309 case VMMR0_DO_SLOW_NOP:
2310 return VINF_SUCCESS;
2311
2312 /*
2313 * For testing Ring-0 APIs invoked in this environment.
2314 */
2315 case VMMR0_DO_TESTS:
2316 /** @todo make new test */
2317 return VINF_SUCCESS;
2318
2319 default:
2320 /*
2321 * We're returning VERR_NOT_SUPPORT here so we've got something else
2322 * than -1 which the interrupt gate glue code might return.
2323 */
2324 Log(("operation %#x is not supported\n", enmOperation));
2325 return VERR_NOT_SUPPORTED;
2326 }
2327 return rc;
2328}
2329
2330
2331/**
2332 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
2333 */
2334typedef struct VMMR0ENTRYEXARGS
2335{
2336 PGVM pGVM;
2337 VMCPUID idCpu;
2338 VMMR0OPERATION enmOperation;
2339 PSUPVMMR0REQHDR pReq;
2340 uint64_t u64Arg;
2341 PSUPDRVSESSION pSession;
2342} VMMR0ENTRYEXARGS;
2343/** Pointer to a vmmR0EntryExWrapper argument package. */
2344typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
2345
2346/**
2347 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2348 *
2349 * @returns VBox status code.
2350 * @param pvArgs The argument package
2351 */
2352static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2353{
2354 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2355 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2356 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2357 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2358 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2359 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2360}
2361
2362
2363/**
2364 * The Ring 0 entry point, called by the support library (SUP).
2365 *
2366 * @returns VBox status code.
2367 * @param pGVM The global (ring-0) VM structure.
2368 * @param pVM The cross context VM structure.
2369 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2370 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2371 * @param enmOperation Which operation to execute.
2372 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2373 * @param u64Arg Some simple constant argument.
2374 * @param pSession The session of the caller.
2375 * @remarks Assume called with interrupts _enabled_.
2376 */
2377VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2378 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2379{
2380 /*
2381 * Requests that should only happen on the EMT thread will be
2382 * wrapped in a setjmp so we can assert without causing trouble.
2383 */
2384 if ( pVM != NULL
2385 && pGVM != NULL
2386 && pVM == pGVM /** @todo drop pGVM */
2387 && idCpu < pGVM->cCpus
2388 && pGVM->pSession == pSession
2389 && pGVM->pSelf == pVM)
2390 {
2391 switch (enmOperation)
2392 {
2393 /* These might/will be called before VMMR3Init. */
2394 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2395 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2396 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2397 case VMMR0_DO_GMM_FREE_PAGES:
2398 case VMMR0_DO_GMM_BALLOONED_PAGES:
2399 /* On the mac we might not have a valid jmp buf, so check these as well. */
2400 case VMMR0_DO_VMMR0_INIT:
2401 case VMMR0_DO_VMMR0_TERM:
2402
2403 case VMMR0_DO_PDM_DEVICE_CREATE:
2404 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2405 case VMMR0_DO_IOM_GROW_IO_PORTS:
2406 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2407 case VMMR0_DO_DBGF_BP_INIT:
2408 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2409 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2410 {
2411 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2412 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2413 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2414 && pGVCpu->hNativeThreadR0 == hNativeThread))
2415 {
2416 if (!pGVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2417 break;
2418
2419 /** @todo validate this EMT claim... GVM knows. */
2420 VMMR0ENTRYEXARGS Args;
2421 Args.pGVM = pGVM;
2422 Args.idCpu = idCpu;
2423 Args.enmOperation = enmOperation;
2424 Args.pReq = pReq;
2425 Args.u64Arg = u64Arg;
2426 Args.pSession = pSession;
2427 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2428 }
2429 return VERR_VM_THREAD_NOT_EMT;
2430 }
2431
2432 default:
2433 case VMMR0_DO_PGM_POOL_GROW:
2434 break;
2435 }
2436 }
2437 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2438}
2439
2440
2441/**
2442 * Checks whether we've armed the ring-0 long jump machinery.
2443 *
2444 * @returns @c true / @c false
2445 * @param pVCpu The cross context virtual CPU structure.
2446 * @thread EMT
2447 * @sa VMMIsLongJumpArmed
2448 */
2449VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2450{
2451#ifdef RT_ARCH_X86
2452 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2453 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2454#else
2455 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2456 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2457#endif
2458}
2459
2460
2461/**
2462 * Checks whether we've done a ring-3 long jump.
2463 *
2464 * @returns @c true / @c false
2465 * @param pVCpu The cross context virtual CPU structure.
2466 * @thread EMT
2467 */
2468VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2469{
2470 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2471}
2472
2473
2474/**
2475 * Internal R0 logger worker: Flush logger.
2476 *
2477 * @param pLogger The logger instance to flush.
2478 * @remark This function must be exported!
2479 */
2480VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2481{
2482#ifdef LOG_ENABLED
2483 /*
2484 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2485 * (This is a bit paranoid code.)
2486 */
2487 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_UOFFSETOF(VMMR0LOGGER, Logger));
2488 if ( !VALID_PTR(pR0Logger)
2489 || !VALID_PTR(pR0Logger + 1)
2490 || pLogger->u32Magic != RTLOGGER_MAGIC)
2491 {
2492# ifdef DEBUG
2493 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2494# endif
2495 return;
2496 }
2497 if (pR0Logger->fFlushingDisabled)
2498 return; /* quietly */
2499
2500 PVMCC pVM = pR0Logger->pVM;
2501 if ( !VALID_PTR(pVM)
2502 || pVM->pSelf != pVM)
2503 {
2504# ifdef DEBUG
2505 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pSelf=%p! pLogger=%p\n", pVM, pVM->pSelf, pLogger);
2506# endif
2507 return;
2508 }
2509
2510 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2511 if (pVCpu)
2512 {
2513 /*
2514 * Check that the jump buffer is armed.
2515 */
2516# ifdef RT_ARCH_X86
2517 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2518 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2519# else
2520 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2521 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2522# endif
2523 {
2524# ifdef DEBUG
2525 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2526# endif
2527 return;
2528 }
2529 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2530 }
2531# ifdef DEBUG
2532 else
2533 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2534# endif
2535#else
2536 NOREF(pLogger);
2537#endif /* LOG_ENABLED */
2538}
2539
2540#ifdef LOG_ENABLED
2541
2542/**
2543 * Disables flushing of the ring-0 debug log.
2544 *
2545 * @param pVCpu The cross context virtual CPU structure.
2546 */
2547VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPUCC pVCpu)
2548{
2549 if (pVCpu->vmm.s.pR0LoggerR0)
2550 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2551 if (pVCpu->vmm.s.pR0RelLoggerR0)
2552 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = true;
2553}
2554
2555
2556/**
2557 * Enables flushing of the ring-0 debug log.
2558 *
2559 * @param pVCpu The cross context virtual CPU structure.
2560 */
2561VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPUCC pVCpu)
2562{
2563 if (pVCpu->vmm.s.pR0LoggerR0)
2564 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2565 if (pVCpu->vmm.s.pR0RelLoggerR0)
2566 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = false;
2567}
2568
2569
2570/**
2571 * Checks if log flushing is disabled or not.
2572 *
2573 * @param pVCpu The cross context virtual CPU structure.
2574 */
2575VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPUCC pVCpu)
2576{
2577 if (pVCpu->vmm.s.pR0LoggerR0)
2578 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2579 if (pVCpu->vmm.s.pR0RelLoggerR0)
2580 return pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled;
2581 return true;
2582}
2583
2584#endif /* LOG_ENABLED */
2585
2586/*
2587 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
2588 */
2589DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
2590{
2591 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
2592 if (pGVCpu)
2593 {
2594 PVMCPUCC pVCpu = pGVCpu;
2595 if (RT_VALID_PTR(pVCpu))
2596 {
2597 PVMMR0LOGGER pVmmLogger = pVCpu->vmm.s.pR0RelLoggerR0;
2598 if (RT_VALID_PTR(pVmmLogger))
2599 {
2600 if ( pVmmLogger->fCreated
2601 && pVmmLogger->pVM == pGVCpu->pGVM)
2602 {
2603 if (pVmmLogger->Logger.fFlags & RTLOGFLAGS_DISABLED)
2604 return NULL;
2605 uint16_t const fFlags = RT_LO_U16(fFlagsAndGroup);
2606 uint16_t const iGroup = RT_HI_U16(fFlagsAndGroup);
2607 if ( iGroup != UINT16_MAX
2608 && ( ( pVmmLogger->Logger.afGroups[iGroup < pVmmLogger->Logger.cGroups ? iGroup : 0]
2609 & (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED))
2610 != (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED)))
2611 return NULL;
2612 return &pVmmLogger->Logger;
2613 }
2614 }
2615 }
2616 }
2617 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
2618}
2619
2620
2621/*
2622 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2623 *
2624 * @returns true if the breakpoint should be hit, false if it should be ignored.
2625 */
2626DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2627{
2628#if 0
2629 return true;
2630#else
2631 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2632 if (pVM)
2633 {
2634 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2635
2636 if (pVCpu)
2637 {
2638# ifdef RT_ARCH_X86
2639 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2640 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2641# else
2642 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2643 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2644# endif
2645 {
2646 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2647 return RT_FAILURE_NP(rc);
2648 }
2649 }
2650 }
2651# ifdef RT_OS_LINUX
2652 return true;
2653# else
2654 return false;
2655# endif
2656#endif
2657}
2658
2659
2660/*
2661 * Override this so we can push it up to ring-3.
2662 */
2663DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2664{
2665 /*
2666 * To the log.
2667 */
2668 LogAlways(("\n!!R0-Assertion Failed!!\n"
2669 "Expression: %s\n"
2670 "Location : %s(%d) %s\n",
2671 pszExpr, pszFile, uLine, pszFunction));
2672
2673 /*
2674 * To the global VMM buffer.
2675 */
2676 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2677 if (pVM)
2678 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2679 "\n!!R0-Assertion Failed!!\n"
2680 "Expression: %.*s\n"
2681 "Location : %s(%d) %s\n",
2682 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2683 pszFile, uLine, pszFunction);
2684
2685 /*
2686 * Continue the normal way.
2687 */
2688 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2689}
2690
2691
2692/**
2693 * Callback for RTLogFormatV which writes to the ring-3 log port.
2694 * See PFNLOGOUTPUT() for details.
2695 */
2696static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2697{
2698 for (size_t i = 0; i < cbChars; i++)
2699 {
2700 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2701 }
2702
2703 NOREF(pv);
2704 return cbChars;
2705}
2706
2707
2708/*
2709 * Override this so we can push it up to ring-3.
2710 */
2711DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2712{
2713 va_list vaCopy;
2714
2715 /*
2716 * Push the message to the loggers.
2717 */
2718 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2719 if (pLog)
2720 {
2721 va_copy(vaCopy, va);
2722 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2723 va_end(vaCopy);
2724 }
2725 pLog = RTLogRelGetDefaultInstance();
2726 if (pLog)
2727 {
2728 va_copy(vaCopy, va);
2729 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2730 va_end(vaCopy);
2731 }
2732
2733 /*
2734 * Push it to the global VMM buffer.
2735 */
2736 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2737 if (pVM)
2738 {
2739 va_copy(vaCopy, va);
2740 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2741 va_end(vaCopy);
2742 }
2743
2744 /*
2745 * Continue the normal way.
2746 */
2747 RTAssertMsg2V(pszFormat, va);
2748}
2749
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette