VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 82567

Last change on this file since 82567 was 82555, checked in by vboxsync, 5 years ago

PGMPool,MM: Use ring-0 mapping while in ring-0, so let the page pool do its own allocations rather than going through MMPage*. The MMPage* code is mostly code, but we still need it for a dummy page allocation. I'll address this tomorrow. bugref:9528

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 96.3 KB
Line 
1/* $Id: VMMR0.cpp 82555 2019-12-11 23:56:54Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_NEM_R0
31# include <VBox/vmm/nem.h>
32#endif
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/stam.h>
35#include <VBox/vmm/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvm.h>
39#ifdef VBOX_WITH_PCI_PASSTHROUGH
40# include <VBox/vmm/pdmpci.h>
41#endif
42#include <VBox/vmm/apic.h>
43
44#include <VBox/vmm/gvmm.h>
45#include <VBox/vmm/gmm.h>
46#include <VBox/vmm/gim.h>
47#include <VBox/intnet.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/param.h>
50#include <VBox/err.h>
51#include <VBox/version.h>
52#include <VBox/log.h>
53
54#include <iprt/asm-amd64-x86.h>
55#include <iprt/assert.h>
56#include <iprt/crc.h>
57#include <iprt/mp.h>
58#include <iprt/once.h>
59#include <iprt/stdarg.h>
60#include <iprt/string.h>
61#include <iprt/thread.h>
62#include <iprt/timer.h>
63#include <iprt/time.h>
64
65#include "dtrace/VBoxVMM.h"
66
67
68#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
69# pragma intrinsic(_AddressOfReturnAddress)
70#endif
71
72#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
73# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
74#endif
75
76
77
78/*********************************************************************************************************************************
79* Defined Constants And Macros *
80*********************************************************************************************************************************/
81/** @def VMM_CHECK_SMAP_SETUP
82 * SMAP check setup. */
83/** @def VMM_CHECK_SMAP_CHECK
84 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
85 * it will be logged and @a a_BadExpr is executed. */
86/** @def VMM_CHECK_SMAP_CHECK2
87 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
88 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
89 * executed. */
90#if defined(VBOX_STRICT) || 1
91# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
92# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
93 do { \
94 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
95 { \
96 RTCCUINTREG fEflCheck = ASMGetFlags(); \
97 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
98 { /* likely */ } \
99 else \
100 { \
101 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
102 a_BadExpr; \
103 } \
104 } \
105 } while (0)
106# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \
107 do { \
108 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
109 { \
110 RTCCUINTREG fEflCheck = ASMGetFlags(); \
111 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
112 { /* likely */ } \
113 else if (a_pGVM) \
114 { \
115 SUPR0BadContext((a_pGVM)->pSession, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
116 RTStrPrintf((a_pGVM)->vmm.s.szRing0AssertMsg1, sizeof((a_pGVM)->vmm.s.szRing0AssertMsg1), \
117 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
118 a_BadExpr; \
119 } \
120 else \
121 { \
122 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
123 a_BadExpr; \
124 } \
125 } \
126 } while (0)
127#else
128# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
129# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
130# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) NOREF(fKernelFeatures)
131#endif
132
133
134/*********************************************************************************************************************************
135* Internal Functions *
136*********************************************************************************************************************************/
137RT_C_DECLS_BEGIN
138#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
139extern uint64_t __udivdi3(uint64_t, uint64_t);
140extern uint64_t __umoddi3(uint64_t, uint64_t);
141#endif
142RT_C_DECLS_END
143
144
145/*********************************************************************************************************************************
146* Global Variables *
147*********************************************************************************************************************************/
148/** Drag in necessary library bits.
149 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
150PFNRT g_VMMR0Deps[] =
151{
152 (PFNRT)RTCrc32,
153 (PFNRT)RTOnce,
154#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
155 (PFNRT)__udivdi3,
156 (PFNRT)__umoddi3,
157#endif
158 NULL
159};
160
161#ifdef RT_OS_SOLARIS
162/* Dependency information for the native solaris loader. */
163extern "C" { char _depends_on[] = "vboxdrv"; }
164#endif
165
166
167/**
168 * Initialize the module.
169 * This is called when we're first loaded.
170 *
171 * @returns 0 on success.
172 * @returns VBox status on failure.
173 * @param hMod Image handle for use in APIs.
174 */
175DECLEXPORT(int) ModuleInit(void *hMod)
176{
177 VMM_CHECK_SMAP_SETUP();
178 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
179
180#ifdef VBOX_WITH_DTRACE_R0
181 /*
182 * The first thing to do is register the static tracepoints.
183 * (Deregistration is automatic.)
184 */
185 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
186 if (RT_FAILURE(rc2))
187 return rc2;
188#endif
189 LogFlow(("ModuleInit:\n"));
190
191#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
192 /*
193 * Display the CMOS debug code.
194 */
195 ASMOutU8(0x72, 0x03);
196 uint8_t bDebugCode = ASMInU8(0x73);
197 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
198 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
199#endif
200
201 /*
202 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
203 */
204 int rc = vmmInitFormatTypes();
205 if (RT_SUCCESS(rc))
206 {
207 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
208 rc = GVMMR0Init();
209 if (RT_SUCCESS(rc))
210 {
211 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
212 rc = GMMR0Init();
213 if (RT_SUCCESS(rc))
214 {
215 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
216 rc = HMR0Init();
217 if (RT_SUCCESS(rc))
218 {
219 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
220
221 PDMR0Init(hMod);
222 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
223
224 rc = PGMRegisterStringFormatTypes();
225 if (RT_SUCCESS(rc))
226 {
227 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
228#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
229 rc = PGMR0DynMapInit();
230#endif
231 if (RT_SUCCESS(rc))
232 {
233 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
234 rc = IntNetR0Init();
235 if (RT_SUCCESS(rc))
236 {
237#ifdef VBOX_WITH_PCI_PASSTHROUGH
238 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
239 rc = PciRawR0Init();
240#endif
241 if (RT_SUCCESS(rc))
242 {
243 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
244 rc = CPUMR0ModuleInit();
245 if (RT_SUCCESS(rc))
246 {
247#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
248 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
249 rc = vmmR0TripleFaultHackInit();
250 if (RT_SUCCESS(rc))
251#endif
252 {
253 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
254 if (RT_SUCCESS(rc))
255 {
256 LogFlow(("ModuleInit: returns success\n"));
257 return VINF_SUCCESS;
258 }
259 }
260
261 /*
262 * Bail out.
263 */
264#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
265 vmmR0TripleFaultHackTerm();
266#endif
267 }
268 else
269 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
270#ifdef VBOX_WITH_PCI_PASSTHROUGH
271 PciRawR0Term();
272#endif
273 }
274 else
275 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
276 IntNetR0Term();
277 }
278 else
279 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
280#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
281 PGMR0DynMapTerm();
282#endif
283 }
284 else
285 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
286 PGMDeregisterStringFormatTypes();
287 }
288 else
289 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
290 HMR0Term();
291 }
292 else
293 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
294 GMMR0Term();
295 }
296 else
297 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
298 GVMMR0Term();
299 }
300 else
301 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
302 vmmTermFormatTypes();
303 }
304 else
305 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
306
307 LogFlow(("ModuleInit: failed %Rrc\n", rc));
308 return rc;
309}
310
311
312/**
313 * Terminate the module.
314 * This is called when we're finally unloaded.
315 *
316 * @param hMod Image handle for use in APIs.
317 */
318DECLEXPORT(void) ModuleTerm(void *hMod)
319{
320 NOREF(hMod);
321 LogFlow(("ModuleTerm:\n"));
322
323 /*
324 * Terminate the CPUM module (Local APIC cleanup).
325 */
326 CPUMR0ModuleTerm();
327
328 /*
329 * Terminate the internal network service.
330 */
331 IntNetR0Term();
332
333 /*
334 * PGM (Darwin), HM and PciRaw global cleanup.
335 */
336#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
337 PGMR0DynMapTerm();
338#endif
339#ifdef VBOX_WITH_PCI_PASSTHROUGH
340 PciRawR0Term();
341#endif
342 PGMDeregisterStringFormatTypes();
343 HMR0Term();
344#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
345 vmmR0TripleFaultHackTerm();
346#endif
347
348 /*
349 * Destroy the GMM and GVMM instances.
350 */
351 GMMR0Term();
352 GVMMR0Term();
353
354 vmmTermFormatTypes();
355
356 LogFlow(("ModuleTerm: returns\n"));
357}
358
359
360/**
361 * Initiates the R0 driver for a particular VM instance.
362 *
363 * @returns VBox status code.
364 *
365 * @param pGVM The global (ring-0) VM structure.
366 * @param uSvnRev The SVN revision of the ring-3 part.
367 * @param uBuildType Build type indicator.
368 * @thread EMT(0)
369 */
370static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
371{
372 VMM_CHECK_SMAP_SETUP();
373 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
374
375 /*
376 * Match the SVN revisions and build type.
377 */
378 if (uSvnRev != VMMGetSvnRev())
379 {
380 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
381 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
382 return VERR_VMM_R0_VERSION_MISMATCH;
383 }
384 if (uBuildType != vmmGetBuildType())
385 {
386 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
387 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
388 return VERR_VMM_R0_VERSION_MISMATCH;
389 }
390
391 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
392 if (RT_FAILURE(rc))
393 return rc;
394
395#ifdef LOG_ENABLED
396 /*
397 * Register the EMT R0 logger instance for VCPU 0.
398 */
399 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
400
401 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
402 if (pR0Logger)
403 {
404# if 0 /* testing of the logger. */
405 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
406 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
407 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
408 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
409
410 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
411 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
412 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
413 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
414
415 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
416 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
417 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
418 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
419
420 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
421 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
422 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
423 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
424 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
425 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
426
427 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
428 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
429
430 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
431 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
432 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
433# endif
434 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pGVM->pSession));
435 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
436 pR0Logger->fRegistered = true;
437 }
438#endif /* LOG_ENABLED */
439
440 /*
441 * Check if the host supports high resolution timers or not.
442 */
443 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
444 && !RTTimerCanDoHighResolution())
445 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
446
447 /*
448 * Initialize the per VM data for GVMM and GMM.
449 */
450 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
451 rc = GVMMR0InitVM(pGVM);
452 if (RT_SUCCESS(rc))
453 {
454 /*
455 * Init HM, CPUM and PGM (Darwin only).
456 */
457 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
458 rc = HMR0InitVM(pGVM);
459 if (RT_SUCCESS(rc))
460 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
461 if (RT_SUCCESS(rc))
462 {
463 rc = CPUMR0InitVM(pGVM);
464 if (RT_SUCCESS(rc))
465 {
466 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
467 rc = PGMR0InitVM(pGVM);
468 if (RT_SUCCESS(rc))
469 {
470 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
471 rc = EMR0InitVM(pGVM);
472 if (RT_SUCCESS(rc))
473 {
474 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
475#ifdef VBOX_WITH_PCI_PASSTHROUGH
476 rc = PciRawR0InitVM(pGVM);
477#endif
478 if (RT_SUCCESS(rc))
479 {
480 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
481 rc = GIMR0InitVM(pGVM);
482 if (RT_SUCCESS(rc))
483 {
484 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION);
485 if (RT_SUCCESS(rc))
486 {
487 GVMMR0DoneInitVM(pGVM);
488
489 /*
490 * Collect a bit of info for the VM release log.
491 */
492 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
493 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
494
495 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
496 return rc;
497 }
498
499 /* bail out*/
500 GIMR0TermVM(pGVM);
501 }
502#ifdef VBOX_WITH_PCI_PASSTHROUGH
503 PciRawR0TermVM(pGVM);
504#endif
505 }
506 }
507 }
508 }
509 HMR0TermVM(pGVM);
510 }
511 }
512
513 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
514 return rc;
515}
516
517
518/**
519 * Does EMT specific VM initialization.
520 *
521 * @returns VBox status code.
522 * @param pGVM The ring-0 VM structure.
523 * @param idCpu The EMT that's calling.
524 */
525static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
526{
527 /* Paranoia (caller checked these already). */
528 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
529 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
530
531#ifdef LOG_ENABLED
532 /*
533 * Registration of ring 0 loggers.
534 */
535 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
536 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
537 if ( pR0Logger
538 && !pR0Logger->fRegistered)
539 {
540 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
541 pR0Logger->fRegistered = true;
542 }
543#endif
544
545 return VINF_SUCCESS;
546}
547
548
549
550/**
551 * Terminates the R0 bits for a particular VM instance.
552 *
553 * This is normally called by ring-3 as part of the VM termination process, but
554 * may alternatively be called during the support driver session cleanup when
555 * the VM object is destroyed (see GVMM).
556 *
557 * @returns VBox status code.
558 *
559 * @param pGVM The global (ring-0) VM structure.
560 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
561 * thread.
562 * @thread EMT(0) or session clean up thread.
563 */
564VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
565{
566 /*
567 * Check EMT(0) claim if we're called from userland.
568 */
569 if (idCpu != NIL_VMCPUID)
570 {
571 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
572 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
573 if (RT_FAILURE(rc))
574 return rc;
575 }
576
577#ifdef VBOX_WITH_PCI_PASSTHROUGH
578 PciRawR0TermVM(pGVM);
579#endif
580
581 /*
582 * Tell GVMM what we're up to and check that we only do this once.
583 */
584 if (GVMMR0DoingTermVM(pGVM))
585 {
586 GIMR0TermVM(pGVM);
587
588 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
589 * here to make sure we don't leak any shared pages if we crash... */
590#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
591 PGMR0DynMapTermVM(pGVM);
592#endif
593 HMR0TermVM(pGVM);
594 }
595
596 /*
597 * Deregister the logger.
598 */
599 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
600 return VINF_SUCCESS;
601}
602
603
604/**
605 * An interrupt or unhalt force flag is set, deal with it.
606 *
607 * @returns VINF_SUCCESS (or VINF_EM_HALT).
608 * @param pVCpu The cross context virtual CPU structure.
609 * @param uMWait Result from EMMonitorWaitIsActive().
610 * @param enmInterruptibility Guest CPU interruptbility level.
611 */
612static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
613{
614 Assert(!TRPMHasTrap(pVCpu));
615 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
616 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
617
618 /*
619 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
620 */
621 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
622 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
623 {
624 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
625 {
626 uint8_t u8Interrupt = 0;
627 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
628 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
629 if (RT_SUCCESS(rc))
630 {
631 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
632
633 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
634 AssertRCSuccess(rc);
635 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
636 return rc;
637 }
638 }
639 }
640 /*
641 * SMI is not implemented yet, at least not here.
642 */
643 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
644 {
645 return VINF_EM_HALT;
646 }
647 /*
648 * NMI.
649 */
650 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
651 {
652 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
653 {
654 /** @todo later. */
655 return VINF_EM_HALT;
656 }
657 }
658 /*
659 * Nested-guest virtual interrupt.
660 */
661 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
662 {
663 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
664 {
665 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
666 * here before injecting the virtual interrupt. See emR3ForcedActions
667 * for details. */
668 return VINF_EM_HALT;
669 }
670 }
671
672 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
673 {
674 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
675 return VINF_SUCCESS;
676 }
677 if (uMWait > 1)
678 {
679 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
680 return VINF_SUCCESS;
681 }
682
683 return VINF_EM_HALT;
684}
685
686
687/**
688 * This does one round of vmR3HaltGlobal1Halt().
689 *
690 * The rational here is that we'll reduce latency in interrupt situations if we
691 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
692 * MWAIT), but do one round of blocking here instead and hope the interrupt is
693 * raised in the meanwhile.
694 *
695 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
696 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
697 * ring-0 call (unless we're too close to a timer event). When the interrupt
698 * wakes us up, we'll return from ring-0 and EM will by instinct do a
699 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
700 * back to VMMR0EntryFast().
701 *
702 * @returns VINF_SUCCESS or VINF_EM_HALT.
703 * @param pGVM The ring-0 VM structure.
704 * @param pGVCpu The ring-0 virtual CPU structure.
705 *
706 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
707 * the VM module, probably to VMM. Then this would be more weird wrt
708 * parameters and statistics.
709 */
710static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
711{
712 /*
713 * Do spin stat historization.
714 */
715 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
716 { /* likely */ }
717 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
718 {
719 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
720 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
721 }
722 else
723 {
724 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
725 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
726 }
727
728 /*
729 * Flags that makes us go to ring-3.
730 */
731 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
732 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
733 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
734 | VM_FF_PGM_NO_MEMORY | VM_FF_DEBUG_SUSPEND;
735 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
736 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
737 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
738 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
739
740 /*
741 * Check preconditions.
742 */
743 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
744 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
745 if ( pGVCpu->vmm.s.fMayHaltInRing0
746 && !TRPMHasTrap(pGVCpu)
747 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
748 || uMWait > 1))
749 {
750 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
751 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
752 {
753 /*
754 * Interrupts pending already?
755 */
756 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
757 APICUpdatePendingInterrupts(pGVCpu);
758
759 /*
760 * Flags that wake up from the halted state.
761 */
762 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
763 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
764
765 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
766 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
767 ASMNopPause();
768
769 /*
770 * Check out how long till the next timer event.
771 */
772 uint64_t u64Delta;
773 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
774
775 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
776 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
777 {
778 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
779 APICUpdatePendingInterrupts(pGVCpu);
780
781 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
782 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
783
784 /*
785 * Wait if there is enough time to the next timer event.
786 */
787 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
788 {
789 /* If there are few other CPU cores around, we will procrastinate a
790 little before going to sleep, hoping for some device raising an
791 interrupt or similar. Though, the best thing here would be to
792 dynamically adjust the spin count according to its usfulness or
793 something... */
794 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
795 && RTMpGetOnlineCount() >= 4)
796 {
797 /** @todo Figure out how we can skip this if it hasn't help recently...
798 * @bugref{9172#c12} */
799 uint32_t cSpinLoops = 42;
800 while (cSpinLoops-- > 0)
801 {
802 ASMNopPause();
803 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
804 APICUpdatePendingInterrupts(pGVCpu);
805 ASMNopPause();
806 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
807 {
808 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
809 return VINF_EM_HALT;
810 }
811 ASMNopPause();
812 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
813 {
814 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
815 return VINF_EM_HALT;
816 }
817 ASMNopPause();
818 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
819 {
820 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
821 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
822 }
823 ASMNopPause();
824 }
825 }
826
827 /* Block. We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
828 knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here). */
829 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED);
830 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
831 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
832 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
833 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
834 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
835 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
836 if ( rc == VINF_SUCCESS
837 || rc == VERR_INTERRUPTED)
838
839 {
840 /* Keep some stats like ring-3 does. */
841 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
842 if (cNsOverslept > 50000)
843 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
844 else if (cNsOverslept < -50000)
845 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
846 else
847 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
848
849 /*
850 * Recheck whether we can resume execution or have to go to ring-3.
851 */
852 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
853 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
854 {
855 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
856 APICUpdatePendingInterrupts(pGVCpu);
857 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
858 {
859 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
860 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
861 }
862 }
863 }
864 }
865 }
866 }
867 }
868 return VINF_EM_HALT;
869}
870
871
872/**
873 * VMM ring-0 thread-context callback.
874 *
875 * This does common HM state updating and calls the HM-specific thread-context
876 * callback.
877 *
878 * @param enmEvent The thread-context event.
879 * @param pvUser Opaque pointer to the VMCPU.
880 *
881 * @thread EMT(pvUser)
882 */
883static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
884{
885 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
886
887 switch (enmEvent)
888 {
889 case RTTHREADCTXEVENT_IN:
890 {
891 /*
892 * Linux may call us with preemption enabled (really!) but technically we
893 * cannot get preempted here, otherwise we end up in an infinite recursion
894 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
895 * ad infinitum). Let's just disable preemption for now...
896 */
897 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
898 * preemption after doing the callout (one or two functions up the
899 * call chain). */
900 /** @todo r=ramshankar: See @bugref{5313#c30}. */
901 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
902 RTThreadPreemptDisable(&ParanoidPreemptState);
903
904 /* We need to update the VCPU <-> host CPU mapping. */
905 RTCPUID idHostCpu;
906 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
907 pVCpu->iHostCpuSet = iHostCpuSet;
908 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
909
910 /* In the very unlikely event that the GIP delta for the CPU we're
911 rescheduled needs calculating, try force a return to ring-3.
912 We unfortunately cannot do the measurements right here. */
913 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
914 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
915
916 /* Invoke the HM-specific thread-context callback. */
917 HMR0ThreadCtxCallback(enmEvent, pvUser);
918
919 /* Restore preemption. */
920 RTThreadPreemptRestore(&ParanoidPreemptState);
921 break;
922 }
923
924 case RTTHREADCTXEVENT_OUT:
925 {
926 /* Invoke the HM-specific thread-context callback. */
927 HMR0ThreadCtxCallback(enmEvent, pvUser);
928
929 /*
930 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
931 * have the same host CPU associated with it.
932 */
933 pVCpu->iHostCpuSet = UINT32_MAX;
934 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
935 break;
936 }
937
938 default:
939 /* Invoke the HM-specific thread-context callback. */
940 HMR0ThreadCtxCallback(enmEvent, pvUser);
941 break;
942 }
943}
944
945
946/**
947 * Creates thread switching hook for the current EMT thread.
948 *
949 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
950 * platform does not implement switcher hooks, no hooks will be create and the
951 * member set to NIL_RTTHREADCTXHOOK.
952 *
953 * @returns VBox status code.
954 * @param pVCpu The cross context virtual CPU structure.
955 * @thread EMT(pVCpu)
956 */
957VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
958{
959 VMCPU_ASSERT_EMT(pVCpu);
960 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
961
962#if 1 /* To disable this stuff change to zero. */
963 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
964 if (RT_SUCCESS(rc))
965 return rc;
966#else
967 RT_NOREF(vmmR0ThreadCtxCallback);
968 int rc = VERR_NOT_SUPPORTED;
969#endif
970
971 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
972 if (rc == VERR_NOT_SUPPORTED)
973 return VINF_SUCCESS;
974
975 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
976 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
977}
978
979
980/**
981 * Destroys the thread switching hook for the specified VCPU.
982 *
983 * @param pVCpu The cross context virtual CPU structure.
984 * @remarks Can be called from any thread.
985 */
986VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
987{
988 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
989 AssertRC(rc);
990 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
991}
992
993
994/**
995 * Disables the thread switching hook for this VCPU (if we got one).
996 *
997 * @param pVCpu The cross context virtual CPU structure.
998 * @thread EMT(pVCpu)
999 *
1000 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
1001 * this call. This means you have to be careful with what you do!
1002 */
1003VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1004{
1005 /*
1006 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1007 * @bugref{7726#c19} explains the need for this trick:
1008 *
1009 * VMXR0CallRing3Callback/SVMR0CallRing3Callback &
1010 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1011 * longjmp & normal return to ring-3, which opens a window where we may be
1012 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
1013 * the CPU starts executing a different EMT. Both functions first disables
1014 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1015 * an opening for getting preempted.
1016 */
1017 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1018 * all the time. */
1019 /** @todo move this into the context hook disabling if(). */
1020 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1021
1022 /*
1023 * Disable the context hook, if we got one.
1024 */
1025 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1026 {
1027 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1028 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1029 AssertRC(rc);
1030 }
1031}
1032
1033
1034/**
1035 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1036 *
1037 * @returns true if registered, false otherwise.
1038 * @param pVCpu The cross context virtual CPU structure.
1039 */
1040DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1041{
1042 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
1043}
1044
1045
1046/**
1047 * Whether thread-context hooks are registered for this VCPU.
1048 *
1049 * @returns true if registered, false otherwise.
1050 * @param pVCpu The cross context virtual CPU structure.
1051 */
1052VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1053{
1054 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1055}
1056
1057
1058#ifdef VBOX_WITH_STATISTICS
1059/**
1060 * Record return code statistics
1061 * @param pVM The cross context VM structure.
1062 * @param pVCpu The cross context virtual CPU structure.
1063 * @param rc The status code.
1064 */
1065static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1066{
1067 /*
1068 * Collect statistics.
1069 */
1070 switch (rc)
1071 {
1072 case VINF_SUCCESS:
1073 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1074 break;
1075 case VINF_EM_RAW_INTERRUPT:
1076 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1077 break;
1078 case VINF_EM_RAW_INTERRUPT_HYPER:
1079 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1080 break;
1081 case VINF_EM_RAW_GUEST_TRAP:
1082 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1083 break;
1084 case VINF_EM_RAW_RING_SWITCH:
1085 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1086 break;
1087 case VINF_EM_RAW_RING_SWITCH_INT:
1088 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1089 break;
1090 case VINF_EM_RAW_STALE_SELECTOR:
1091 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1092 break;
1093 case VINF_EM_RAW_IRET_TRAP:
1094 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1095 break;
1096 case VINF_IOM_R3_IOPORT_READ:
1097 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1098 break;
1099 case VINF_IOM_R3_IOPORT_WRITE:
1100 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1101 break;
1102 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1103 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1104 break;
1105 case VINF_IOM_R3_MMIO_READ:
1106 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1107 break;
1108 case VINF_IOM_R3_MMIO_WRITE:
1109 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1110 break;
1111 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1112 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1113 break;
1114 case VINF_IOM_R3_MMIO_READ_WRITE:
1115 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1116 break;
1117 case VINF_PATM_HC_MMIO_PATCH_READ:
1118 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1119 break;
1120 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1121 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1122 break;
1123 case VINF_CPUM_R3_MSR_READ:
1124 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1125 break;
1126 case VINF_CPUM_R3_MSR_WRITE:
1127 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1128 break;
1129 case VINF_EM_RAW_EMULATE_INSTR:
1130 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1131 break;
1132 case VINF_PATCH_EMULATE_INSTR:
1133 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1134 break;
1135 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1136 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1137 break;
1138 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1139 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1140 break;
1141 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1142 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1143 break;
1144 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1145 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1146 break;
1147 case VINF_CSAM_PENDING_ACTION:
1148 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1149 break;
1150 case VINF_PGM_SYNC_CR3:
1151 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1152 break;
1153 case VINF_PATM_PATCH_INT3:
1154 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1155 break;
1156 case VINF_PATM_PATCH_TRAP_PF:
1157 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1158 break;
1159 case VINF_PATM_PATCH_TRAP_GP:
1160 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1161 break;
1162 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1163 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1164 break;
1165 case VINF_EM_RESCHEDULE_REM:
1166 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1167 break;
1168 case VINF_EM_RAW_TO_R3:
1169 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1170 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1171 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1172 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1173 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1174 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1175 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1176 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1177 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1178 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1179 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1180 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1181 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1182 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1183 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1184 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1185 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1186 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1187 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1188 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1189 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1190 else
1191 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1192 break;
1193
1194 case VINF_EM_RAW_TIMER_PENDING:
1195 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1196 break;
1197 case VINF_EM_RAW_INTERRUPT_PENDING:
1198 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1199 break;
1200 case VINF_VMM_CALL_HOST:
1201 switch (pVCpu->vmm.s.enmCallRing3Operation)
1202 {
1203 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1204 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1205 break;
1206 case VMMCALLRING3_PDM_LOCK:
1207 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1208 break;
1209 case VMMCALLRING3_PGM_POOL_GROW:
1210 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1211 break;
1212 case VMMCALLRING3_PGM_LOCK:
1213 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1214 break;
1215 case VMMCALLRING3_PGM_MAP_CHUNK:
1216 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1217 break;
1218 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1219 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1220 break;
1221 case VMMCALLRING3_VMM_LOGGER_FLUSH:
1222 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
1223 break;
1224 case VMMCALLRING3_VM_SET_ERROR:
1225 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1226 break;
1227 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1228 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1229 break;
1230 case VMMCALLRING3_VM_R0_ASSERTION:
1231 default:
1232 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1233 break;
1234 }
1235 break;
1236 case VINF_PATM_DUPLICATE_FUNCTION:
1237 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1238 break;
1239 case VINF_PGM_CHANGE_MODE:
1240 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1241 break;
1242 case VINF_PGM_POOL_FLUSH_PENDING:
1243 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1244 break;
1245 case VINF_EM_PENDING_REQUEST:
1246 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1247 break;
1248 case VINF_EM_HM_PATCH_TPR_INSTR:
1249 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1250 break;
1251 default:
1252 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1253 break;
1254 }
1255}
1256#endif /* VBOX_WITH_STATISTICS */
1257
1258
1259/**
1260 * The Ring 0 entry point, called by the fast-ioctl path.
1261 *
1262 * @param pGVM The global (ring-0) VM structure.
1263 * @param pVMIgnored The cross context VM structure. The return code is
1264 * stored in pVM->vmm.s.iLastGZRc.
1265 * @param idCpu The Virtual CPU ID of the calling EMT.
1266 * @param enmOperation Which operation to execute.
1267 * @remarks Assume called with interrupts _enabled_.
1268 */
1269VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1270{
1271 RT_NOREF(pVMIgnored);
1272
1273 /*
1274 * Validation.
1275 */
1276 if ( idCpu < pGVM->cCpus
1277 && pGVM->cCpus == pGVM->cCpusUnsafe)
1278 { /*likely*/ }
1279 else
1280 {
1281 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1282 return;
1283 }
1284
1285 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1286 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1287 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1288 && pGVCpu->hNativeThreadR0 == hNativeThread))
1289 { /* likely */ }
1290 else
1291 {
1292 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1293 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1294 return;
1295 }
1296
1297 /*
1298 * SMAP fun.
1299 */
1300 VMM_CHECK_SMAP_SETUP();
1301 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1302
1303 /*
1304 * Perform requested operation.
1305 */
1306 switch (enmOperation)
1307 {
1308 /*
1309 * Run guest code using the available hardware acceleration technology.
1310 */
1311 case VMMR0_DO_HM_RUN:
1312 {
1313 for (;;) /* hlt loop */
1314 {
1315 /*
1316 * Disable preemption.
1317 */
1318 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1319 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1320 RTThreadPreemptDisable(&PreemptState);
1321
1322 /*
1323 * Get the host CPU identifiers, make sure they are valid and that
1324 * we've got a TSC delta for the CPU.
1325 */
1326 RTCPUID idHostCpu;
1327 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1328 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1329 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1330 {
1331 pGVCpu->iHostCpuSet = iHostCpuSet;
1332 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1333
1334 /*
1335 * Update the periodic preemption timer if it's active.
1336 */
1337 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1338 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1339 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1340
1341#ifdef VMM_R0_TOUCH_FPU
1342 /*
1343 * Make sure we've got the FPU state loaded so and we don't need to clear
1344 * CR0.TS and get out of sync with the host kernel when loading the guest
1345 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1346 */
1347 CPUMR0TouchHostFpu();
1348#endif
1349 int rc;
1350 bool fPreemptRestored = false;
1351 if (!HMR0SuspendPending())
1352 {
1353 /*
1354 * Enable the context switching hook.
1355 */
1356 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1357 {
1358 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmm.s.hCtxHook));
1359 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1360 }
1361
1362 /*
1363 * Enter HM context.
1364 */
1365 rc = HMR0Enter(pGVCpu);
1366 if (RT_SUCCESS(rc))
1367 {
1368 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1369
1370 /*
1371 * When preemption hooks are in place, enable preemption now that
1372 * we're in HM context.
1373 */
1374 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1375 {
1376 fPreemptRestored = true;
1377 RTThreadPreemptRestore(&PreemptState);
1378 }
1379
1380 /*
1381 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1382 */
1383 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1384 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);
1385 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1386
1387 /*
1388 * Assert sanity on the way out. Using manual assertions code here as normal
1389 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1390 */
1391 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1392 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1393 {
1394 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1395 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1396 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1397 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1398 }
1399 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1400 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1401 {
1402 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1403 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1404 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1405 rc = VERR_INVALID_STATE;
1406 }
1407
1408 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1409 }
1410 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1411
1412 /*
1413 * Invalidate the host CPU identifiers before we disable the context
1414 * hook / restore preemption.
1415 */
1416 pGVCpu->iHostCpuSet = UINT32_MAX;
1417 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1418
1419 /*
1420 * Disable context hooks. Due to unresolved cleanup issues, we
1421 * cannot leave the hooks enabled when we return to ring-3.
1422 *
1423 * Note! At the moment HM may also have disabled the hook
1424 * when we get here, but the IPRT API handles that.
1425 */
1426 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1427 {
1428 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1429 RTThreadCtxHookDisable(pGVCpu->vmm.s.hCtxHook);
1430 }
1431 }
1432 /*
1433 * The system is about to go into suspend mode; go back to ring 3.
1434 */
1435 else
1436 {
1437 rc = VINF_EM_RAW_INTERRUPT;
1438 pGVCpu->iHostCpuSet = UINT32_MAX;
1439 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1440 }
1441
1442 /** @todo When HM stops messing with the context hook state, we'll disable
1443 * preemption again before the RTThreadCtxHookDisable call. */
1444 if (!fPreemptRestored)
1445 RTThreadPreemptRestore(&PreemptState);
1446
1447 pGVCpu->vmm.s.iLastGZRc = rc;
1448
1449 /* Fire dtrace probe and collect statistics. */
1450 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1451#ifdef VBOX_WITH_STATISTICS
1452 vmmR0RecordRC(pGVM, pGVCpu, rc);
1453#endif
1454#if 1
1455 /*
1456 * If this is a halt.
1457 */
1458 if (rc != VINF_EM_HALT)
1459 { /* we're not in a hurry for a HLT, so prefer this path */ }
1460 else
1461 {
1462 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1463 if (rc == VINF_SUCCESS)
1464 {
1465 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1466 continue;
1467 }
1468 pGVCpu->vmm.s.cR0HaltsToRing3++;
1469 }
1470#endif
1471 }
1472 /*
1473 * Invalid CPU set index or TSC delta in need of measuring.
1474 */
1475 else
1476 {
1477 pGVCpu->iHostCpuSet = UINT32_MAX;
1478 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1479 RTThreadPreemptRestore(&PreemptState);
1480 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1481 {
1482 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1483 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1484 0 /*default cTries*/);
1485 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1486 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1487 else
1488 pGVCpu->vmm.s.iLastGZRc = rc;
1489 }
1490 else
1491 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1492 }
1493 break;
1494
1495 } /* halt loop. */
1496 break;
1497 }
1498
1499#ifdef VBOX_WITH_NEM_R0
1500# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1501 case VMMR0_DO_NEM_RUN:
1502 {
1503 /*
1504 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1505 */
1506 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1507# ifdef VBOXSTRICTRC_STRICT_ENABLED
1508 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1509# else
1510 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1511# endif
1512 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1513 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1514
1515 pGVCpu->vmm.s.iLastGZRc = rc;
1516
1517 /*
1518 * Fire dtrace probe and collect statistics.
1519 */
1520 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1521# ifdef VBOX_WITH_STATISTICS
1522 vmmR0RecordRC(pGVM, pGVCpu, rc);
1523# endif
1524 break;
1525 }
1526# endif
1527#endif
1528
1529 /*
1530 * For profiling.
1531 */
1532 case VMMR0_DO_NOP:
1533 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1534 break;
1535
1536 /*
1537 * Shouldn't happen.
1538 */
1539 default:
1540 AssertMsgFailed(("%#x\n", enmOperation));
1541 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1542 break;
1543 }
1544 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1545}
1546
1547
1548/**
1549 * Validates a session or VM session argument.
1550 *
1551 * @returns true / false accordingly.
1552 * @param pGVM The global (ring-0) VM structure.
1553 * @param pClaimedSession The session claim to validate.
1554 * @param pSession The session argument.
1555 */
1556DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1557{
1558 /* This must be set! */
1559 if (!pSession)
1560 return false;
1561
1562 /* Only one out of the two. */
1563 if (pGVM && pClaimedSession)
1564 return false;
1565 if (pGVM)
1566 pClaimedSession = pGVM->pSession;
1567 return pClaimedSession == pSession;
1568}
1569
1570
1571/**
1572 * VMMR0EntryEx worker function, either called directly or when ever possible
1573 * called thru a longjmp so we can exit safely on failure.
1574 *
1575 * @returns VBox status code.
1576 * @param pGVM The global (ring-0) VM structure.
1577 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1578 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1579 * @param enmOperation Which operation to execute.
1580 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1581 * The support driver validates this if it's present.
1582 * @param u64Arg Some simple constant argument.
1583 * @param pSession The session of the caller.
1584 *
1585 * @remarks Assume called with interrupts _enabled_.
1586 */
1587static int vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1588 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1589{
1590 /*
1591 * Validate pGVM and idCpu for consistency and validity.
1592 */
1593 if (pGVM != NULL)
1594 {
1595 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0))
1596 { /* likely */ }
1597 else
1598 {
1599 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1600 return VERR_INVALID_POINTER;
1601 }
1602
1603 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1604 { /* likely */ }
1605 else
1606 {
1607 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1608 return VERR_INVALID_PARAMETER;
1609 }
1610
1611 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1612 && pGVM->enmVMState <= VMSTATE_TERMINATED
1613 && pGVM->pSession == pSession
1614 && pGVM->pSelf == pGVM))
1615 { /* likely */ }
1616 else
1617 {
1618 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1619 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1620 return VERR_INVALID_POINTER;
1621 }
1622 }
1623 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1624 { /* likely */ }
1625 else
1626 {
1627 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1628 return VERR_INVALID_PARAMETER;
1629 }
1630
1631 /*
1632 * SMAP fun.
1633 */
1634 VMM_CHECK_SMAP_SETUP();
1635 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1636
1637 /*
1638 * Process the request.
1639 */
1640 int rc;
1641 switch (enmOperation)
1642 {
1643 /*
1644 * GVM requests
1645 */
1646 case VMMR0_DO_GVMM_CREATE_VM:
1647 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1648 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1649 else
1650 rc = VERR_INVALID_PARAMETER;
1651 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1652 break;
1653
1654 case VMMR0_DO_GVMM_DESTROY_VM:
1655 if (pReqHdr == NULL && u64Arg == 0)
1656 rc = GVMMR0DestroyVM(pGVM);
1657 else
1658 rc = VERR_INVALID_PARAMETER;
1659 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1660 break;
1661
1662 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1663 if (pGVM != NULL)
1664 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1665 else
1666 rc = VERR_INVALID_PARAMETER;
1667 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1668 break;
1669
1670 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1671 if (pGVM != NULL)
1672 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1673 else
1674 rc = VERR_INVALID_PARAMETER;
1675 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1676 break;
1677
1678 case VMMR0_DO_GVMM_SCHED_HALT:
1679 if (pReqHdr)
1680 return VERR_INVALID_PARAMETER;
1681 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1682 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1683 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1684 break;
1685
1686 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1687 if (pReqHdr || u64Arg)
1688 return VERR_INVALID_PARAMETER;
1689 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1690 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1691 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1692 break;
1693
1694 case VMMR0_DO_GVMM_SCHED_POKE:
1695 if (pReqHdr || u64Arg)
1696 return VERR_INVALID_PARAMETER;
1697 rc = GVMMR0SchedPoke(pGVM, idCpu);
1698 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1699 break;
1700
1701 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1702 if (u64Arg)
1703 return VERR_INVALID_PARAMETER;
1704 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1705 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1706 break;
1707
1708 case VMMR0_DO_GVMM_SCHED_POLL:
1709 if (pReqHdr || u64Arg > 1)
1710 return VERR_INVALID_PARAMETER;
1711 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
1712 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1713 break;
1714
1715 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1716 if (u64Arg)
1717 return VERR_INVALID_PARAMETER;
1718 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1719 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1720 break;
1721
1722 case VMMR0_DO_GVMM_RESET_STATISTICS:
1723 if (u64Arg)
1724 return VERR_INVALID_PARAMETER;
1725 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1726 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1727 break;
1728
1729 /*
1730 * Initialize the R0 part of a VM instance.
1731 */
1732 case VMMR0_DO_VMMR0_INIT:
1733 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1734 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1735 break;
1736
1737 /*
1738 * Does EMT specific ring-0 init.
1739 */
1740 case VMMR0_DO_VMMR0_INIT_EMT:
1741 rc = vmmR0InitVMEmt(pGVM, idCpu);
1742 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1743 break;
1744
1745 /*
1746 * Terminate the R0 part of a VM instance.
1747 */
1748 case VMMR0_DO_VMMR0_TERM:
1749 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
1750 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1751 break;
1752
1753 /*
1754 * Attempt to enable hm mode and check the current setting.
1755 */
1756 case VMMR0_DO_HM_ENABLE:
1757 rc = HMR0EnableAllCpus(pGVM);
1758 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1759 break;
1760
1761 /*
1762 * Setup the hardware accelerated session.
1763 */
1764 case VMMR0_DO_HM_SETUP_VM:
1765 rc = HMR0SetupVM(pGVM);
1766 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1767 break;
1768
1769 /*
1770 * PGM wrappers.
1771 */
1772 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1773 if (idCpu == NIL_VMCPUID)
1774 return VERR_INVALID_CPU_ID;
1775 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
1776 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1777 break;
1778
1779 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1780 if (idCpu == NIL_VMCPUID)
1781 return VERR_INVALID_CPU_ID;
1782 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
1783 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1784 break;
1785
1786 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1787 if (idCpu == NIL_VMCPUID)
1788 return VERR_INVALID_CPU_ID;
1789 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, idCpu);
1790 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1791 break;
1792
1793 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1794 if (idCpu != 0)
1795 return VERR_INVALID_CPU_ID;
1796 rc = PGMR0PhysSetupIoMmu(pGVM);
1797 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1798 break;
1799
1800 case VMMR0_DO_PGM_POOL_GROW:
1801 if (idCpu == NIL_VMCPUID)
1802 return VERR_INVALID_CPU_ID;
1803 rc = PGMR0PoolGrow(pGVM);
1804 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1805 break;
1806
1807 /*
1808 * GMM wrappers.
1809 */
1810 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1811 if (u64Arg)
1812 return VERR_INVALID_PARAMETER;
1813 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1814 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1815 break;
1816
1817 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1818 if (u64Arg)
1819 return VERR_INVALID_PARAMETER;
1820 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1821 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1822 break;
1823
1824 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1825 if (u64Arg)
1826 return VERR_INVALID_PARAMETER;
1827 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1828 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1829 break;
1830
1831 case VMMR0_DO_GMM_FREE_PAGES:
1832 if (u64Arg)
1833 return VERR_INVALID_PARAMETER;
1834 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1835 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1836 break;
1837
1838 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1839 if (u64Arg)
1840 return VERR_INVALID_PARAMETER;
1841 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1842 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1843 break;
1844
1845 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1846 if (u64Arg)
1847 return VERR_INVALID_PARAMETER;
1848 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1849 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1850 break;
1851
1852 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1853 if (idCpu == NIL_VMCPUID)
1854 return VERR_INVALID_CPU_ID;
1855 if (u64Arg)
1856 return VERR_INVALID_PARAMETER;
1857 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1858 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1859 break;
1860
1861 case VMMR0_DO_GMM_BALLOONED_PAGES:
1862 if (u64Arg)
1863 return VERR_INVALID_PARAMETER;
1864 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1865 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1866 break;
1867
1868 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1869 if (u64Arg)
1870 return VERR_INVALID_PARAMETER;
1871 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1872 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1873 break;
1874
1875 case VMMR0_DO_GMM_SEED_CHUNK:
1876 if (pReqHdr)
1877 return VERR_INVALID_PARAMETER;
1878 rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg);
1879 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1880 break;
1881
1882 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1883 if (idCpu == NIL_VMCPUID)
1884 return VERR_INVALID_CPU_ID;
1885 if (u64Arg)
1886 return VERR_INVALID_PARAMETER;
1887 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1888 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1889 break;
1890
1891 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1892 if (idCpu == NIL_VMCPUID)
1893 return VERR_INVALID_CPU_ID;
1894 if (u64Arg)
1895 return VERR_INVALID_PARAMETER;
1896 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1897 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1898 break;
1899
1900 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1901 if (idCpu == NIL_VMCPUID)
1902 return VERR_INVALID_CPU_ID;
1903 if ( u64Arg
1904 || pReqHdr)
1905 return VERR_INVALID_PARAMETER;
1906 rc = GMMR0ResetSharedModules(pGVM, idCpu);
1907 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1908 break;
1909
1910#ifdef VBOX_WITH_PAGE_SHARING
1911 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1912 {
1913 if (idCpu == NIL_VMCPUID)
1914 return VERR_INVALID_CPU_ID;
1915 if ( u64Arg
1916 || pReqHdr)
1917 return VERR_INVALID_PARAMETER;
1918 rc = GMMR0CheckSharedModules(pGVM, idCpu);
1919 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1920 break;
1921 }
1922#endif
1923
1924#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1925 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1926 if (u64Arg)
1927 return VERR_INVALID_PARAMETER;
1928 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1929 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1930 break;
1931#endif
1932
1933 case VMMR0_DO_GMM_QUERY_STATISTICS:
1934 if (u64Arg)
1935 return VERR_INVALID_PARAMETER;
1936 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1937 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1938 break;
1939
1940 case VMMR0_DO_GMM_RESET_STATISTICS:
1941 if (u64Arg)
1942 return VERR_INVALID_PARAMETER;
1943 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1944 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1945 break;
1946
1947 /*
1948 * A quick GCFGM mock-up.
1949 */
1950 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1951 case VMMR0_DO_GCFGM_SET_VALUE:
1952 case VMMR0_DO_GCFGM_QUERY_VALUE:
1953 {
1954 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1955 return VERR_INVALID_PARAMETER;
1956 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1957 if (pReq->Hdr.cbReq != sizeof(*pReq))
1958 return VERR_INVALID_PARAMETER;
1959 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1960 {
1961 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1962 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1963 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1964 }
1965 else
1966 {
1967 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1968 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1969 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1970 }
1971 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1972 break;
1973 }
1974
1975 /*
1976 * PDM Wrappers.
1977 */
1978 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1979 {
1980 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1981 return VERR_INVALID_PARAMETER;
1982 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1983 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1984 break;
1985 }
1986
1987 case VMMR0_DO_PDM_DEVICE_CREATE:
1988 {
1989 if (!pReqHdr || u64Arg || idCpu != 0)
1990 return VERR_INVALID_PARAMETER;
1991 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
1992 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1993 break;
1994 }
1995
1996 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
1997 {
1998 if (!pReqHdr || u64Arg)
1999 return VERR_INVALID_PARAMETER;
2000 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr, idCpu);
2001 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2002 break;
2003 }
2004
2005 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2006 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2007 {
2008 if (!pReqHdr || u64Arg || idCpu != 0)
2009 return VERR_INVALID_PARAMETER;
2010 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2011 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2012 break;
2013 }
2014
2015 /*
2016 * Requests to the internal networking service.
2017 */
2018 case VMMR0_DO_INTNET_OPEN:
2019 {
2020 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2021 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2022 return VERR_INVALID_PARAMETER;
2023 rc = IntNetR0OpenReq(pSession, pReq);
2024 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2025 break;
2026 }
2027
2028 case VMMR0_DO_INTNET_IF_CLOSE:
2029 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2030 return VERR_INVALID_PARAMETER;
2031 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2032 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2033 break;
2034
2035
2036 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2037 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2038 return VERR_INVALID_PARAMETER;
2039 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2040 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2041 break;
2042
2043 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2044 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2045 return VERR_INVALID_PARAMETER;
2046 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2047 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2048 break;
2049
2050 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2051 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2052 return VERR_INVALID_PARAMETER;
2053 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2054 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2055 break;
2056
2057 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2058 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2059 return VERR_INVALID_PARAMETER;
2060 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2061 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2062 break;
2063
2064 case VMMR0_DO_INTNET_IF_SEND:
2065 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2066 return VERR_INVALID_PARAMETER;
2067 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2068 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2069 break;
2070
2071 case VMMR0_DO_INTNET_IF_WAIT:
2072 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2073 return VERR_INVALID_PARAMETER;
2074 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2075 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2076 break;
2077
2078 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2079 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2080 return VERR_INVALID_PARAMETER;
2081 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2082 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2083 break;
2084
2085#if 0 //def VBOX_WITH_PCI_PASSTHROUGH
2086 /*
2087 * Requests to host PCI driver service.
2088 */
2089 case VMMR0_DO_PCIRAW_REQ:
2090 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2091 return VERR_INVALID_PARAMETER;
2092 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2093 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2094 break;
2095#endif
2096
2097 /*
2098 * NEM requests.
2099 */
2100#ifdef VBOX_WITH_NEM_R0
2101# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2102 case VMMR0_DO_NEM_INIT_VM:
2103 if (u64Arg || pReqHdr || idCpu != 0)
2104 return VERR_INVALID_PARAMETER;
2105 rc = NEMR0InitVM(pGVM);
2106 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2107 break;
2108
2109 case VMMR0_DO_NEM_INIT_VM_PART_2:
2110 if (u64Arg || pReqHdr || idCpu != 0)
2111 return VERR_INVALID_PARAMETER;
2112 rc = NEMR0InitVMPart2(pGVM);
2113 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2114 break;
2115
2116 case VMMR0_DO_NEM_MAP_PAGES:
2117 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2118 return VERR_INVALID_PARAMETER;
2119 rc = NEMR0MapPages(pGVM, idCpu);
2120 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2121 break;
2122
2123 case VMMR0_DO_NEM_UNMAP_PAGES:
2124 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2125 return VERR_INVALID_PARAMETER;
2126 rc = NEMR0UnmapPages(pGVM, idCpu);
2127 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2128 break;
2129
2130 case VMMR0_DO_NEM_EXPORT_STATE:
2131 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2132 return VERR_INVALID_PARAMETER;
2133 rc = NEMR0ExportState(pGVM, idCpu);
2134 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2135 break;
2136
2137 case VMMR0_DO_NEM_IMPORT_STATE:
2138 if (pReqHdr || idCpu == NIL_VMCPUID)
2139 return VERR_INVALID_PARAMETER;
2140 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2141 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2142 break;
2143
2144 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2145 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2146 return VERR_INVALID_PARAMETER;
2147 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2148 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2149 break;
2150
2151 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2152 if (pReqHdr || idCpu == NIL_VMCPUID)
2153 return VERR_INVALID_PARAMETER;
2154 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2155 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2156 break;
2157
2158 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2159 if (u64Arg || pReqHdr)
2160 return VERR_INVALID_PARAMETER;
2161 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2162 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2163 break;
2164
2165# if 1 && defined(DEBUG_bird)
2166 case VMMR0_DO_NEM_EXPERIMENT:
2167 if (pReqHdr)
2168 return VERR_INVALID_PARAMETER;
2169 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2170 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2171 break;
2172# endif
2173# endif
2174#endif
2175
2176 /*
2177 * IOM requests.
2178 */
2179 case VMMR0_DO_IOM_GROW_IO_PORTS:
2180 {
2181 if (pReqHdr || idCpu != 0)
2182 return VERR_INVALID_PARAMETER;
2183 rc = IOMR0IoPortGrowRegistrationTables(pGVM, u64Arg);
2184 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2185 break;
2186 }
2187
2188 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2189 {
2190 if (pReqHdr || idCpu != 0)
2191 return VERR_INVALID_PARAMETER;
2192 rc = IOMR0IoPortGrowStatisticsTable(pGVM, u64Arg);
2193 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2194 break;
2195 }
2196
2197 case VMMR0_DO_IOM_GROW_MMIO_REGS:
2198 {
2199 if (pReqHdr || idCpu != 0)
2200 return VERR_INVALID_PARAMETER;
2201 rc = IOMR0MmioGrowRegistrationTables(pGVM, u64Arg);
2202 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2203 break;
2204 }
2205
2206 case VMMR0_DO_IOM_GROW_MMIO_STATS:
2207 {
2208 if (pReqHdr || idCpu != 0)
2209 return VERR_INVALID_PARAMETER;
2210 rc = IOMR0MmioGrowStatisticsTable(pGVM, u64Arg);
2211 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2212 break;
2213 }
2214
2215 case VMMR0_DO_IOM_SYNC_STATS_INDICES:
2216 {
2217 if (pReqHdr || idCpu != 0)
2218 return VERR_INVALID_PARAMETER;
2219 rc = IOMR0IoPortSyncStatisticsIndices(pGVM);
2220 if (RT_SUCCESS(rc))
2221 rc = IOMR0MmioSyncStatisticsIndices(pGVM);
2222 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2223 break;
2224 }
2225
2226 /*
2227 * For profiling.
2228 */
2229 case VMMR0_DO_NOP:
2230 case VMMR0_DO_SLOW_NOP:
2231 return VINF_SUCCESS;
2232
2233 /*
2234 * For testing Ring-0 APIs invoked in this environment.
2235 */
2236 case VMMR0_DO_TESTS:
2237 /** @todo make new test */
2238 return VINF_SUCCESS;
2239
2240 default:
2241 /*
2242 * We're returning VERR_NOT_SUPPORT here so we've got something else
2243 * than -1 which the interrupt gate glue code might return.
2244 */
2245 Log(("operation %#x is not supported\n", enmOperation));
2246 return VERR_NOT_SUPPORTED;
2247 }
2248 return rc;
2249}
2250
2251
2252/**
2253 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
2254 */
2255typedef struct VMMR0ENTRYEXARGS
2256{
2257 PGVM pGVM;
2258 VMCPUID idCpu;
2259 VMMR0OPERATION enmOperation;
2260 PSUPVMMR0REQHDR pReq;
2261 uint64_t u64Arg;
2262 PSUPDRVSESSION pSession;
2263} VMMR0ENTRYEXARGS;
2264/** Pointer to a vmmR0EntryExWrapper argument package. */
2265typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
2266
2267/**
2268 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2269 *
2270 * @returns VBox status code.
2271 * @param pvArgs The argument package
2272 */
2273static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2274{
2275 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2276 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2277 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2278 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2279 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2280 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2281}
2282
2283
2284/**
2285 * The Ring 0 entry point, called by the support library (SUP).
2286 *
2287 * @returns VBox status code.
2288 * @param pGVM The global (ring-0) VM structure.
2289 * @param pVM The cross context VM structure.
2290 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2291 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2292 * @param enmOperation Which operation to execute.
2293 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2294 * @param u64Arg Some simple constant argument.
2295 * @param pSession The session of the caller.
2296 * @remarks Assume called with interrupts _enabled_.
2297 */
2298VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2299 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2300{
2301 /*
2302 * Requests that should only happen on the EMT thread will be
2303 * wrapped in a setjmp so we can assert without causing trouble.
2304 */
2305 if ( pVM != NULL
2306 && pGVM != NULL
2307 && pVM == pGVM /** @todo drop pGVM */
2308 && idCpu < pGVM->cCpus
2309 && pGVM->pSession == pSession
2310 && pGVM->pSelf == pVM)
2311 {
2312 switch (enmOperation)
2313 {
2314 /* These might/will be called before VMMR3Init. */
2315 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2316 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2317 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2318 case VMMR0_DO_GMM_FREE_PAGES:
2319 case VMMR0_DO_GMM_BALLOONED_PAGES:
2320 /* On the mac we might not have a valid jmp buf, so check these as well. */
2321 case VMMR0_DO_VMMR0_INIT:
2322 case VMMR0_DO_VMMR0_TERM:
2323
2324 case VMMR0_DO_PDM_DEVICE_CREATE:
2325 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2326 case VMMR0_DO_IOM_GROW_IO_PORTS:
2327 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2328 {
2329 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2330 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2331 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2332 && pGVCpu->hNativeThreadR0 == hNativeThread))
2333 {
2334 if (!pGVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2335 break;
2336
2337 /** @todo validate this EMT claim... GVM knows. */
2338 VMMR0ENTRYEXARGS Args;
2339 Args.pGVM = pGVM;
2340 Args.idCpu = idCpu;
2341 Args.enmOperation = enmOperation;
2342 Args.pReq = pReq;
2343 Args.u64Arg = u64Arg;
2344 Args.pSession = pSession;
2345 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2346 }
2347 return VERR_VM_THREAD_NOT_EMT;
2348 }
2349
2350 default:
2351 case VMMR0_DO_PGM_POOL_GROW:
2352 break;
2353 }
2354 }
2355 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2356}
2357
2358
2359/**
2360 * Checks whether we've armed the ring-0 long jump machinery.
2361 *
2362 * @returns @c true / @c false
2363 * @param pVCpu The cross context virtual CPU structure.
2364 * @thread EMT
2365 * @sa VMMIsLongJumpArmed
2366 */
2367VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2368{
2369#ifdef RT_ARCH_X86
2370 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2371 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2372#else
2373 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2374 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2375#endif
2376}
2377
2378
2379/**
2380 * Checks whether we've done a ring-3 long jump.
2381 *
2382 * @returns @c true / @c false
2383 * @param pVCpu The cross context virtual CPU structure.
2384 * @thread EMT
2385 */
2386VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2387{
2388 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2389}
2390
2391
2392/**
2393 * Internal R0 logger worker: Flush logger.
2394 *
2395 * @param pLogger The logger instance to flush.
2396 * @remark This function must be exported!
2397 */
2398VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2399{
2400#ifdef LOG_ENABLED
2401 /*
2402 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2403 * (This is a bit paranoid code.)
2404 */
2405 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_UOFFSETOF(VMMR0LOGGER, Logger));
2406 if ( !VALID_PTR(pR0Logger)
2407 || !VALID_PTR(pR0Logger + 1)
2408 || pLogger->u32Magic != RTLOGGER_MAGIC)
2409 {
2410# ifdef DEBUG
2411 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2412# endif
2413 return;
2414 }
2415 if (pR0Logger->fFlushingDisabled)
2416 return; /* quietly */
2417
2418 PVMCC pVM = pR0Logger->pVM;
2419 if ( !VALID_PTR(pVM)
2420 || pVM->pSelf != pVM)
2421 {
2422# ifdef DEBUG
2423 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pSelf=%p! pLogger=%p\n", pVM, pVM->pSelf, pLogger);
2424# endif
2425 return;
2426 }
2427
2428 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2429 if (pVCpu)
2430 {
2431 /*
2432 * Check that the jump buffer is armed.
2433 */
2434# ifdef RT_ARCH_X86
2435 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2436 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2437# else
2438 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2439 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2440# endif
2441 {
2442# ifdef DEBUG
2443 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2444# endif
2445 return;
2446 }
2447 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2448 }
2449# ifdef DEBUG
2450 else
2451 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2452# endif
2453#else
2454 NOREF(pLogger);
2455#endif /* LOG_ENABLED */
2456}
2457
2458#ifdef LOG_ENABLED
2459
2460/**
2461 * Disables flushing of the ring-0 debug log.
2462 *
2463 * @param pVCpu The cross context virtual CPU structure.
2464 */
2465VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPUCC pVCpu)
2466{
2467 if (pVCpu->vmm.s.pR0LoggerR0)
2468 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2469 if (pVCpu->vmm.s.pR0RelLoggerR0)
2470 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = true;
2471}
2472
2473
2474/**
2475 * Enables flushing of the ring-0 debug log.
2476 *
2477 * @param pVCpu The cross context virtual CPU structure.
2478 */
2479VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPUCC pVCpu)
2480{
2481 if (pVCpu->vmm.s.pR0LoggerR0)
2482 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2483 if (pVCpu->vmm.s.pR0RelLoggerR0)
2484 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = false;
2485}
2486
2487
2488/**
2489 * Checks if log flushing is disabled or not.
2490 *
2491 * @param pVCpu The cross context virtual CPU structure.
2492 */
2493VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPUCC pVCpu)
2494{
2495 if (pVCpu->vmm.s.pR0LoggerR0)
2496 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2497 if (pVCpu->vmm.s.pR0RelLoggerR0)
2498 return pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled;
2499 return true;
2500}
2501
2502#endif /* LOG_ENABLED */
2503
2504/**
2505 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
2506 */
2507DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
2508{
2509 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
2510 if (pGVCpu)
2511 {
2512 PVMCPUCC pVCpu = pGVCpu;
2513 if (RT_VALID_PTR(pVCpu))
2514 {
2515 PVMMR0LOGGER pVmmLogger = pVCpu->vmm.s.pR0RelLoggerR0;
2516 if (RT_VALID_PTR(pVmmLogger))
2517 {
2518 if ( pVmmLogger->fCreated
2519 && pVmmLogger->pVM == pGVCpu->pGVM)
2520 {
2521 if (pVmmLogger->Logger.fFlags & RTLOGFLAGS_DISABLED)
2522 return NULL;
2523 uint16_t const fFlags = RT_LO_U16(fFlagsAndGroup);
2524 uint16_t const iGroup = RT_HI_U16(fFlagsAndGroup);
2525 if ( iGroup != UINT16_MAX
2526 && ( ( pVmmLogger->Logger.afGroups[iGroup < pVmmLogger->Logger.cGroups ? iGroup : 0]
2527 & (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED))
2528 != (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED)))
2529 return NULL;
2530 return &pVmmLogger->Logger;
2531 }
2532 }
2533 }
2534 }
2535 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
2536}
2537
2538
2539/**
2540 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2541 *
2542 * @returns true if the breakpoint should be hit, false if it should be ignored.
2543 */
2544DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2545{
2546#if 0
2547 return true;
2548#else
2549 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2550 if (pVM)
2551 {
2552 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2553
2554 if (pVCpu)
2555 {
2556#ifdef RT_ARCH_X86
2557 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2558 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2559#else
2560 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2561 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2562#endif
2563 {
2564 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2565 return RT_FAILURE_NP(rc);
2566 }
2567 }
2568 }
2569#ifdef RT_OS_LINUX
2570 return true;
2571#else
2572 return false;
2573#endif
2574#endif
2575}
2576
2577
2578/**
2579 * Override this so we can push it up to ring-3.
2580 *
2581 * @param pszExpr Expression. Can be NULL.
2582 * @param uLine Location line number.
2583 * @param pszFile Location file name.
2584 * @param pszFunction Location function name.
2585 */
2586DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2587{
2588 /*
2589 * To the log.
2590 */
2591 LogAlways(("\n!!R0-Assertion Failed!!\n"
2592 "Expression: %s\n"
2593 "Location : %s(%d) %s\n",
2594 pszExpr, pszFile, uLine, pszFunction));
2595
2596 /*
2597 * To the global VMM buffer.
2598 */
2599 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2600 if (pVM)
2601 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2602 "\n!!R0-Assertion Failed!!\n"
2603 "Expression: %.*s\n"
2604 "Location : %s(%d) %s\n",
2605 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2606 pszFile, uLine, pszFunction);
2607
2608 /*
2609 * Continue the normal way.
2610 */
2611 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2612}
2613
2614
2615/**
2616 * Callback for RTLogFormatV which writes to the ring-3 log port.
2617 * See PFNLOGOUTPUT() for details.
2618 */
2619static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2620{
2621 for (size_t i = 0; i < cbChars; i++)
2622 {
2623 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2624 }
2625
2626 NOREF(pv);
2627 return cbChars;
2628}
2629
2630
2631/**
2632 * Override this so we can push it up to ring-3.
2633 *
2634 * @param pszFormat The format string.
2635 * @param va Arguments.
2636 */
2637DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2638{
2639 va_list vaCopy;
2640
2641 /*
2642 * Push the message to the loggers.
2643 */
2644 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2645 if (pLog)
2646 {
2647 va_copy(vaCopy, va);
2648 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2649 va_end(vaCopy);
2650 }
2651 pLog = RTLogRelGetDefaultInstance();
2652 if (pLog)
2653 {
2654 va_copy(vaCopy, va);
2655 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2656 va_end(vaCopy);
2657 }
2658
2659 /*
2660 * Push it to the global VMM buffer.
2661 */
2662 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2663 if (pVM)
2664 {
2665 va_copy(vaCopy, va);
2666 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2667 va_end(vaCopy);
2668 }
2669
2670 /*
2671 * Continue the normal way.
2672 */
2673 RTAssertMsg2V(pszFormat, va);
2674}
2675
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette