VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 78382

Last change on this file since 78382 was 76553, checked in by vboxsync, 6 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 103.9 KB
Line 
1/* $Id: VMMR0.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#ifdef VBOX_WITH_NEM_R0
30# include <VBox/vmm/nem.h>
31#endif
32#include <VBox/vmm/em.h>
33#include <VBox/vmm/stam.h>
34#include <VBox/vmm/tm.h>
35#include "VMMInternal.h"
36#include <VBox/vmm/vm.h>
37#include <VBox/vmm/gvm.h>
38#ifdef VBOX_WITH_PCI_PASSTHROUGH
39# include <VBox/vmm/pdmpci.h>
40#endif
41#include <VBox/vmm/apic.h>
42
43#include <VBox/vmm/gvmm.h>
44#include <VBox/vmm/gmm.h>
45#include <VBox/vmm/gim.h>
46#include <VBox/intnet.h>
47#include <VBox/vmm/hm.h>
48#include <VBox/param.h>
49#include <VBox/err.h>
50#include <VBox/version.h>
51#include <VBox/log.h>
52
53#include <iprt/asm-amd64-x86.h>
54#include <iprt/assert.h>
55#include <iprt/crc.h>
56#include <iprt/mp.h>
57#include <iprt/once.h>
58#include <iprt/stdarg.h>
59#include <iprt/string.h>
60#include <iprt/thread.h>
61#include <iprt/timer.h>
62#include <iprt/time.h>
63
64#include "dtrace/VBoxVMM.h"
65
66
67#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
68# pragma intrinsic(_AddressOfReturnAddress)
69#endif
70
71#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
72# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
73#endif
74
75
76
77/*********************************************************************************************************************************
78* Defined Constants And Macros *
79*********************************************************************************************************************************/
80/** @def VMM_CHECK_SMAP_SETUP
81 * SMAP check setup. */
82/** @def VMM_CHECK_SMAP_CHECK
83 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
84 * it will be logged and @a a_BadExpr is executed. */
85/** @def VMM_CHECK_SMAP_CHECK2
86 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
87 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
88 * executed. */
89#if defined(VBOX_STRICT) || 1
90# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
91# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
92 do { \
93 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
94 { \
95 RTCCUINTREG fEflCheck = ASMGetFlags(); \
96 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
97 { /* likely */ } \
98 else \
99 { \
100 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
101 a_BadExpr; \
102 } \
103 } \
104 } while (0)
105# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
106 do { \
107 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
108 { \
109 RTCCUINTREG fEflCheck = ASMGetFlags(); \
110 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
111 { /* likely */ } \
112 else \
113 { \
114 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
115 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
116 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
117 a_BadExpr; \
118 } \
119 } \
120 } while (0)
121#else
122# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
123# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
124# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) NOREF(fKernelFeatures)
125#endif
126
127
128/*********************************************************************************************************************************
129* Internal Functions *
130*********************************************************************************************************************************/
131RT_C_DECLS_BEGIN
132#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
133extern uint64_t __udivdi3(uint64_t, uint64_t);
134extern uint64_t __umoddi3(uint64_t, uint64_t);
135#endif
136RT_C_DECLS_END
137
138
139/*********************************************************************************************************************************
140* Global Variables *
141*********************************************************************************************************************************/
142/** Drag in necessary library bits.
143 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
144PFNRT g_VMMR0Deps[] =
145{
146 (PFNRT)RTCrc32,
147 (PFNRT)RTOnce,
148#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
149 (PFNRT)__udivdi3,
150 (PFNRT)__umoddi3,
151#endif
152 NULL
153};
154
155#ifdef RT_OS_SOLARIS
156/* Dependency information for the native solaris loader. */
157extern "C" { char _depends_on[] = "vboxdrv"; }
158#endif
159
160/** The result of SUPR0GetRawModeUsability(), set by ModuleInit(). */
161int g_rcRawModeUsability = VINF_SUCCESS;
162
163
164/**
165 * Initialize the module.
166 * This is called when we're first loaded.
167 *
168 * @returns 0 on success.
169 * @returns VBox status on failure.
170 * @param hMod Image handle for use in APIs.
171 */
172DECLEXPORT(int) ModuleInit(void *hMod)
173{
174 VMM_CHECK_SMAP_SETUP();
175 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
176
177#ifdef VBOX_WITH_DTRACE_R0
178 /*
179 * The first thing to do is register the static tracepoints.
180 * (Deregistration is automatic.)
181 */
182 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
183 if (RT_FAILURE(rc2))
184 return rc2;
185#endif
186 LogFlow(("ModuleInit:\n"));
187
188#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
189 /*
190 * Display the CMOS debug code.
191 */
192 ASMOutU8(0x72, 0x03);
193 uint8_t bDebugCode = ASMInU8(0x73);
194 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
195 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
196#endif
197
198 /*
199 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
200 */
201 int rc = vmmInitFormatTypes();
202 if (RT_SUCCESS(rc))
203 {
204 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
205 rc = GVMMR0Init();
206 if (RT_SUCCESS(rc))
207 {
208 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
209 rc = GMMR0Init();
210 if (RT_SUCCESS(rc))
211 {
212 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
213 rc = HMR0Init();
214 if (RT_SUCCESS(rc))
215 {
216 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
217 rc = PGMRegisterStringFormatTypes();
218 if (RT_SUCCESS(rc))
219 {
220 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
221#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
222 rc = PGMR0DynMapInit();
223#endif
224 if (RT_SUCCESS(rc))
225 {
226 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
227 rc = IntNetR0Init();
228 if (RT_SUCCESS(rc))
229 {
230#ifdef VBOX_WITH_PCI_PASSTHROUGH
231 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
232 rc = PciRawR0Init();
233#endif
234 if (RT_SUCCESS(rc))
235 {
236 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
237 rc = CPUMR0ModuleInit();
238 if (RT_SUCCESS(rc))
239 {
240#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
241 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
242 rc = vmmR0TripleFaultHackInit();
243 if (RT_SUCCESS(rc))
244#endif
245 {
246 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
247 if (RT_SUCCESS(rc))
248 {
249 g_rcRawModeUsability = SUPR0GetRawModeUsability();
250 if (g_rcRawModeUsability != VINF_SUCCESS)
251 SUPR0Printf("VMMR0!ModuleInit: SUPR0GetRawModeUsability -> %Rrc\n",
252 g_rcRawModeUsability);
253 LogFlow(("ModuleInit: returns success\n"));
254 return VINF_SUCCESS;
255 }
256 }
257
258 /*
259 * Bail out.
260 */
261#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
262 vmmR0TripleFaultHackTerm();
263#endif
264 }
265 else
266 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
267#ifdef VBOX_WITH_PCI_PASSTHROUGH
268 PciRawR0Term();
269#endif
270 }
271 else
272 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
273 IntNetR0Term();
274 }
275 else
276 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
277#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
278 PGMR0DynMapTerm();
279#endif
280 }
281 else
282 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
283 PGMDeregisterStringFormatTypes();
284 }
285 else
286 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
287 HMR0Term();
288 }
289 else
290 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
291 GMMR0Term();
292 }
293 else
294 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
295 GVMMR0Term();
296 }
297 else
298 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
299 vmmTermFormatTypes();
300 }
301 else
302 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
303
304 LogFlow(("ModuleInit: failed %Rrc\n", rc));
305 return rc;
306}
307
308
309/**
310 * Terminate the module.
311 * This is called when we're finally unloaded.
312 *
313 * @param hMod Image handle for use in APIs.
314 */
315DECLEXPORT(void) ModuleTerm(void *hMod)
316{
317 NOREF(hMod);
318 LogFlow(("ModuleTerm:\n"));
319
320 /*
321 * Terminate the CPUM module (Local APIC cleanup).
322 */
323 CPUMR0ModuleTerm();
324
325 /*
326 * Terminate the internal network service.
327 */
328 IntNetR0Term();
329
330 /*
331 * PGM (Darwin), HM and PciRaw global cleanup.
332 */
333#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
334 PGMR0DynMapTerm();
335#endif
336#ifdef VBOX_WITH_PCI_PASSTHROUGH
337 PciRawR0Term();
338#endif
339 PGMDeregisterStringFormatTypes();
340 HMR0Term();
341#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
342 vmmR0TripleFaultHackTerm();
343#endif
344
345 /*
346 * Destroy the GMM and GVMM instances.
347 */
348 GMMR0Term();
349 GVMMR0Term();
350
351 vmmTermFormatTypes();
352
353 LogFlow(("ModuleTerm: returns\n"));
354}
355
356
357/**
358 * Initiates the R0 driver for a particular VM instance.
359 *
360 * @returns VBox status code.
361 *
362 * @param pGVM The global (ring-0) VM structure.
363 * @param pVM The cross context VM structure.
364 * @param uSvnRev The SVN revision of the ring-3 part.
365 * @param uBuildType Build type indicator.
366 * @thread EMT(0)
367 */
368static int vmmR0InitVM(PGVM pGVM, PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
369{
370 VMM_CHECK_SMAP_SETUP();
371 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
372
373 /*
374 * Match the SVN revisions and build type.
375 */
376 if (uSvnRev != VMMGetSvnRev())
377 {
378 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
379 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
380 return VERR_VMM_R0_VERSION_MISMATCH;
381 }
382 if (uBuildType != vmmGetBuildType())
383 {
384 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
385 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
386 return VERR_VMM_R0_VERSION_MISMATCH;
387 }
388
389 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0 /*idCpu*/);
390 if (RT_FAILURE(rc))
391 return rc;
392
393#ifdef LOG_ENABLED
394 /*
395 * Register the EMT R0 logger instance for VCPU 0.
396 */
397 PVMCPU pVCpu = &pVM->aCpus[0];
398
399 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
400 if (pR0Logger)
401 {
402# if 0 /* testing of the logger. */
403 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
404 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
405 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
406 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
407
408 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
409 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
410 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
411 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
412
413 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
414 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
415 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
416 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
417
418 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
419 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
420 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
421 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
422 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
423 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
424
425 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
426 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
427
428 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
429 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
430 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
431# endif
432 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
433 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
434 pR0Logger->fRegistered = true;
435 }
436#endif /* LOG_ENABLED */
437
438 /*
439 * Check if the host supports high resolution timers or not.
440 */
441 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
442 && !RTTimerCanDoHighResolution())
443 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
444
445 /*
446 * Initialize the per VM data for GVMM and GMM.
447 */
448 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
449 rc = GVMMR0InitVM(pGVM);
450// if (RT_SUCCESS(rc))
451// rc = GMMR0InitPerVMData(pVM);
452 if (RT_SUCCESS(rc))
453 {
454 /*
455 * Init HM, CPUM and PGM (Darwin only).
456 */
457 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
458 rc = HMR0InitVM(pVM);
459 if (RT_SUCCESS(rc))
460 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
461 if (RT_SUCCESS(rc))
462 {
463 rc = CPUMR0InitVM(pVM);
464 if (RT_SUCCESS(rc))
465 {
466 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
467#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
468 rc = PGMR0DynMapInitVM(pVM);
469#endif
470 if (RT_SUCCESS(rc))
471 {
472 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
473 rc = EMR0InitVM(pGVM, pVM);
474 if (RT_SUCCESS(rc))
475 {
476 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
477#ifdef VBOX_WITH_PCI_PASSTHROUGH
478 rc = PciRawR0InitVM(pGVM, pVM);
479#endif
480 if (RT_SUCCESS(rc))
481 {
482 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
483 rc = GIMR0InitVM(pVM);
484 if (RT_SUCCESS(rc))
485 {
486 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
487 if (RT_SUCCESS(rc))
488 {
489 GVMMR0DoneInitVM(pGVM);
490
491 /*
492 * Collect a bit of info for the VM release log.
493 */
494 pVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
495 pVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
496
497 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
498 return rc;
499 }
500
501 /* bail out*/
502 GIMR0TermVM(pVM);
503 }
504#ifdef VBOX_WITH_PCI_PASSTHROUGH
505 PciRawR0TermVM(pGVM, pVM);
506#endif
507 }
508 }
509 }
510 }
511 HMR0TermVM(pVM);
512 }
513 }
514
515 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
516 return rc;
517}
518
519
520/**
521 * Does EMT specific VM initialization.
522 *
523 * @returns VBox status code.
524 * @param pGVM The ring-0 VM structure.
525 * @param pVM The cross context VM structure.
526 * @param idCpu The EMT that's calling.
527 */
528static int vmmR0InitVMEmt(PGVM pGVM, PVM pVM, VMCPUID idCpu)
529{
530 /* Paranoia (caller checked these already). */
531 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
532 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
533
534#ifdef LOG_ENABLED
535 /*
536 * Registration of ring 0 loggers.
537 */
538 PVMCPU pVCpu = &pVM->aCpus[idCpu];
539 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
540 if ( pR0Logger
541 && !pR0Logger->fRegistered)
542 {
543 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
544 pR0Logger->fRegistered = true;
545 }
546#endif
547 RT_NOREF(pVM);
548
549 return VINF_SUCCESS;
550}
551
552
553
554/**
555 * Terminates the R0 bits for a particular VM instance.
556 *
557 * This is normally called by ring-3 as part of the VM termination process, but
558 * may alternatively be called during the support driver session cleanup when
559 * the VM object is destroyed (see GVMM).
560 *
561 * @returns VBox status code.
562 *
563 * @param pGVM The global (ring-0) VM structure.
564 * @param pVM The cross context VM structure.
565 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
566 * thread.
567 * @thread EMT(0) or session clean up thread.
568 */
569VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, PVM pVM, VMCPUID idCpu)
570{
571 /*
572 * Check EMT(0) claim if we're called from userland.
573 */
574 if (idCpu != NIL_VMCPUID)
575 {
576 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
577 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
578 if (RT_FAILURE(rc))
579 return rc;
580 }
581
582#ifdef VBOX_WITH_PCI_PASSTHROUGH
583 PciRawR0TermVM(pGVM, pVM);
584#endif
585
586 /*
587 * Tell GVMM what we're up to and check that we only do this once.
588 */
589 if (GVMMR0DoingTermVM(pGVM))
590 {
591 GIMR0TermVM(pVM);
592
593 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
594 * here to make sure we don't leak any shared pages if we crash... */
595#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
596 PGMR0DynMapTermVM(pVM);
597#endif
598 HMR0TermVM(pVM);
599 }
600
601 /*
602 * Deregister the logger.
603 */
604 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
605 return VINF_SUCCESS;
606}
607
608
609/**
610 * An interrupt or unhalt force flag is set, deal with it.
611 *
612 * @returns VINF_SUCCESS (or VINF_EM_HALT).
613 * @param pVCpu The cross context virtual CPU structure.
614 * @param uMWait Result from EMMonitorWaitIsActive().
615 * @param enmInterruptibility Guest CPU interruptbility level.
616 */
617static int vmmR0DoHaltInterrupt(PVMCPU pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
618{
619 Assert(!TRPMHasTrap(pVCpu));
620 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
621 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
622
623 /*
624 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
625 */
626 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
627 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
628 {
629 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
630 {
631 uint8_t u8Interrupt = 0;
632 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
633 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
634 if (RT_SUCCESS(rc))
635 {
636 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
637
638 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
639 AssertRCSuccess(rc);
640 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
641 return rc;
642 }
643 }
644 }
645 /*
646 * SMI is not implemented yet, at least not here.
647 */
648 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
649 {
650 return VINF_EM_HALT;
651 }
652 /*
653 * NMI.
654 */
655 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
656 {
657 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
658 {
659 /** @todo later. */
660 return VINF_EM_HALT;
661 }
662 }
663 /*
664 * Nested-guest virtual interrupt.
665 */
666 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
667 {
668 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
669 {
670 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
671 * here before injecting the virtual interrupt. See emR3ForcedActions
672 * for details. */
673 return VINF_EM_HALT;
674 }
675 }
676
677 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
678 {
679 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
680 return VINF_SUCCESS;
681 }
682 if (uMWait > 1)
683 {
684 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
685 return VINF_SUCCESS;
686 }
687
688 return VINF_EM_HALT;
689}
690
691
692/**
693 * This does one round of vmR3HaltGlobal1Halt().
694 *
695 * The rational here is that we'll reduce latency in interrupt situations if we
696 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
697 * MWAIT), but do one round of blocking here instead and hope the interrupt is
698 * raised in the meanwhile.
699 *
700 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
701 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
702 * ring-0 call (unless we're too close to a timer event). When the interrupt
703 * wakes us up, we'll return from ring-0 and EM will by instinct do a
704 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
705 * back to VMMR0EntryFast().
706 *
707 * @returns VINF_SUCCESS or VINF_EM_HALT.
708 * @param pGVM The ring-0 VM structure.
709 * @param pVM The cross context VM structure.
710 * @param pGVCpu The ring-0 virtual CPU structure.
711 * @param pVCpu The cross context virtual CPU structure.
712 *
713 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
714 * the VM module, probably to VMM. Then this would be more weird wrt
715 * parameters and statistics.
716 */
717static int vmmR0DoHalt(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, PVMCPU pVCpu)
718{
719 Assert(pVCpu == pGVCpu->pVCpu);
720
721 /*
722 * Do spin stat historization.
723 */
724 if (++pVCpu->vmm.s.cR0Halts & 0xff)
725 { /* likely */ }
726 else if (pVCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3)
727 {
728 pVCpu->vmm.s.cR0HaltsSucceeded = 2;
729 pVCpu->vmm.s.cR0HaltsToRing3 = 0;
730 }
731 else
732 {
733 pVCpu->vmm.s.cR0HaltsSucceeded = 0;
734 pVCpu->vmm.s.cR0HaltsToRing3 = 2;
735 }
736
737 /*
738 * Flags that makes us go to ring-3.
739 */
740 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
741 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
742 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
743 | VM_FF_PGM_NO_MEMORY | VM_FF_REM_HANDLER_NOTIFY | VM_FF_DEBUG_SUSPEND;
744 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
745 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
746 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
747 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM
748#ifdef VBOX_WITH_RAW_MODE
749 | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT
750 | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_CSAM_SCAN_PAGE | VMCPU_FF_CSAM_PENDING_ACTION
751 | VMCPU_FF_CPUM
752#endif
753 ;
754
755 /*
756 * Check preconditions.
757 */
758 unsigned const uMWait = EMMonitorWaitIsActive(pVCpu);
759 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pVCpu);
760 if ( pVCpu->vmm.s.fMayHaltInRing0
761 && !TRPMHasTrap(pVCpu)
762 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
763 || uMWait > 1))
764 {
765 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs)
766 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
767 {
768 /*
769 * Interrupts pending already?
770 */
771 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
772 APICUpdatePendingInterrupts(pVCpu);
773
774 /*
775 * Flags that wake up from the halted state.
776 */
777 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
778 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
779
780 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
781 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
782 ASMNopPause();
783
784 /*
785 * Check out how long till the next timer event.
786 */
787 uint64_t u64Delta;
788 uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta);
789
790 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs)
791 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
792 {
793 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
794 APICUpdatePendingInterrupts(pVCpu);
795
796 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
797 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
798
799 /*
800 * Wait if there is enough time to the next timer event.
801 */
802 if (u64Delta >= pVCpu->vmm.s.cNsSpinBlockThreshold)
803 {
804 /* If there are few other CPU cores around, we will procrastinate a
805 little before going to sleep, hoping for some device raising an
806 interrupt or similar. Though, the best thing here would be to
807 dynamically adjust the spin count according to its usfulness or
808 something... */
809 if ( pVCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3
810 && RTMpGetOnlineCount() >= 4)
811 {
812 /** @todo Figure out how we can skip this if it hasn't help recently...
813 * @bugref{9172#c12} */
814 uint32_t cSpinLoops = 42;
815 while (cSpinLoops-- > 0)
816 {
817 ASMNopPause();
818 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
819 APICUpdatePendingInterrupts(pVCpu);
820 ASMNopPause();
821 if (VM_FF_IS_ANY_SET(pVM, fVmFFs))
822 {
823 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3FromSpin);
824 return VINF_EM_HALT;
825 }
826 ASMNopPause();
827 if (VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
828 {
829 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3FromSpin);
830 return VINF_EM_HALT;
831 }
832 ASMNopPause();
833 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
834 {
835 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExecFromSpin);
836 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
837 }
838 ASMNopPause();
839 }
840 }
841
842 /* Block. We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
843 knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here). */
844 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED);
845 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
846 int rc = GVMMR0SchedHalt(pGVM, pVM, pGVCpu, u64GipTime);
847 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
848 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
849 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
850 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
851 if ( rc == VINF_SUCCESS
852 || rc == VERR_INTERRUPTED)
853
854 {
855 /* Keep some stats like ring-3 does. */
856 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
857 if (cNsOverslept > 50000)
858 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
859 else if (cNsOverslept < -50000)
860 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
861 else
862 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
863
864 /*
865 * Recheck whether we can resume execution or have to go to ring-3.
866 */
867 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs)
868 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
869 {
870 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
871 APICUpdatePendingInterrupts(pVCpu);
872 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
873 {
874 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExecFromBlock);
875 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
876 }
877 }
878 }
879 }
880 }
881 }
882 }
883 return VINF_EM_HALT;
884}
885
886
887/**
888 * VMM ring-0 thread-context callback.
889 *
890 * This does common HM state updating and calls the HM-specific thread-context
891 * callback.
892 *
893 * @param enmEvent The thread-context event.
894 * @param pvUser Opaque pointer to the VMCPU.
895 *
896 * @thread EMT(pvUser)
897 */
898static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
899{
900 PVMCPU pVCpu = (PVMCPU)pvUser;
901
902 switch (enmEvent)
903 {
904 case RTTHREADCTXEVENT_IN:
905 {
906 /*
907 * Linux may call us with preemption enabled (really!) but technically we
908 * cannot get preempted here, otherwise we end up in an infinite recursion
909 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
910 * ad infinitum). Let's just disable preemption for now...
911 */
912 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
913 * preemption after doing the callout (one or two functions up the
914 * call chain). */
915 /** @todo r=ramshankar: See @bugref{5313#c30}. */
916 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
917 RTThreadPreemptDisable(&ParanoidPreemptState);
918
919 /* We need to update the VCPU <-> host CPU mapping. */
920 RTCPUID idHostCpu;
921 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
922 pVCpu->iHostCpuSet = iHostCpuSet;
923 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
924
925 /* In the very unlikely event that the GIP delta for the CPU we're
926 rescheduled needs calculating, try force a return to ring-3.
927 We unfortunately cannot do the measurements right here. */
928 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
929 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
930
931 /* Invoke the HM-specific thread-context callback. */
932 HMR0ThreadCtxCallback(enmEvent, pvUser);
933
934 /* Restore preemption. */
935 RTThreadPreemptRestore(&ParanoidPreemptState);
936 break;
937 }
938
939 case RTTHREADCTXEVENT_OUT:
940 {
941 /* Invoke the HM-specific thread-context callback. */
942 HMR0ThreadCtxCallback(enmEvent, pvUser);
943
944 /*
945 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
946 * have the same host CPU associated with it.
947 */
948 pVCpu->iHostCpuSet = UINT32_MAX;
949 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
950 break;
951 }
952
953 default:
954 /* Invoke the HM-specific thread-context callback. */
955 HMR0ThreadCtxCallback(enmEvent, pvUser);
956 break;
957 }
958}
959
960
961/**
962 * Creates thread switching hook for the current EMT thread.
963 *
964 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
965 * platform does not implement switcher hooks, no hooks will be create and the
966 * member set to NIL_RTTHREADCTXHOOK.
967 *
968 * @returns VBox status code.
969 * @param pVCpu The cross context virtual CPU structure.
970 * @thread EMT(pVCpu)
971 */
972VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu)
973{
974 VMCPU_ASSERT_EMT(pVCpu);
975 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
976
977#if 1 /* To disable this stuff change to zero. */
978 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
979 if (RT_SUCCESS(rc))
980 return rc;
981#else
982 RT_NOREF(vmmR0ThreadCtxCallback);
983 int rc = VERR_NOT_SUPPORTED;
984#endif
985
986 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
987 if (rc == VERR_NOT_SUPPORTED)
988 return VINF_SUCCESS;
989
990 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
991 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
992}
993
994
995/**
996 * Destroys the thread switching hook for the specified VCPU.
997 *
998 * @param pVCpu The cross context virtual CPU structure.
999 * @remarks Can be called from any thread.
1000 */
1001VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu)
1002{
1003 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
1004 AssertRC(rc);
1005 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1006}
1007
1008
1009/**
1010 * Disables the thread switching hook for this VCPU (if we got one).
1011 *
1012 * @param pVCpu The cross context virtual CPU structure.
1013 * @thread EMT(pVCpu)
1014 *
1015 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
1016 * this call. This means you have to be careful with what you do!
1017 */
1018VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu)
1019{
1020 /*
1021 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1022 * @bugref{7726#c19} explains the need for this trick:
1023 *
1024 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
1025 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1026 * longjmp & normal return to ring-3, which opens a window where we may be
1027 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
1028 * the CPU starts executing a different EMT. Both functions first disables
1029 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1030 * an opening for getting preempted.
1031 */
1032 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1033 * all the time. */
1034 /** @todo move this into the context hook disabling if(). */
1035 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1036
1037 /*
1038 * Disable the context hook, if we got one.
1039 */
1040 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1041 {
1042 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1043 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1044 AssertRC(rc);
1045 }
1046}
1047
1048
1049/**
1050 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1051 *
1052 * @returns true if registered, false otherwise.
1053 * @param pVCpu The cross context virtual CPU structure.
1054 */
1055DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
1056{
1057 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
1058}
1059
1060
1061/**
1062 * Whether thread-context hooks are registered for this VCPU.
1063 *
1064 * @returns true if registered, false otherwise.
1065 * @param pVCpu The cross context virtual CPU structure.
1066 */
1067VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
1068{
1069 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1070}
1071
1072
1073#ifdef VBOX_WITH_STATISTICS
1074/**
1075 * Record return code statistics
1076 * @param pVM The cross context VM structure.
1077 * @param pVCpu The cross context virtual CPU structure.
1078 * @param rc The status code.
1079 */
1080static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
1081{
1082 /*
1083 * Collect statistics.
1084 */
1085 switch (rc)
1086 {
1087 case VINF_SUCCESS:
1088 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1089 break;
1090 case VINF_EM_RAW_INTERRUPT:
1091 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1092 break;
1093 case VINF_EM_RAW_INTERRUPT_HYPER:
1094 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1095 break;
1096 case VINF_EM_RAW_GUEST_TRAP:
1097 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1098 break;
1099 case VINF_EM_RAW_RING_SWITCH:
1100 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1101 break;
1102 case VINF_EM_RAW_RING_SWITCH_INT:
1103 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1104 break;
1105 case VINF_EM_RAW_STALE_SELECTOR:
1106 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1107 break;
1108 case VINF_EM_RAW_IRET_TRAP:
1109 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1110 break;
1111 case VINF_IOM_R3_IOPORT_READ:
1112 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1113 break;
1114 case VINF_IOM_R3_IOPORT_WRITE:
1115 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1116 break;
1117 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1118 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1119 break;
1120 case VINF_IOM_R3_MMIO_READ:
1121 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1122 break;
1123 case VINF_IOM_R3_MMIO_WRITE:
1124 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1125 break;
1126 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1127 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1128 break;
1129 case VINF_IOM_R3_MMIO_READ_WRITE:
1130 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1131 break;
1132 case VINF_PATM_HC_MMIO_PATCH_READ:
1133 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1134 break;
1135 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1136 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1137 break;
1138 case VINF_CPUM_R3_MSR_READ:
1139 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1140 break;
1141 case VINF_CPUM_R3_MSR_WRITE:
1142 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1143 break;
1144 case VINF_EM_RAW_EMULATE_INSTR:
1145 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1146 break;
1147 case VINF_PATCH_EMULATE_INSTR:
1148 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1149 break;
1150 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1151 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1152 break;
1153 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1154 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1155 break;
1156 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1157 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1158 break;
1159 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1160 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1161 break;
1162 case VINF_CSAM_PENDING_ACTION:
1163 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1164 break;
1165 case VINF_PGM_SYNC_CR3:
1166 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1167 break;
1168 case VINF_PATM_PATCH_INT3:
1169 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1170 break;
1171 case VINF_PATM_PATCH_TRAP_PF:
1172 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1173 break;
1174 case VINF_PATM_PATCH_TRAP_GP:
1175 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1176 break;
1177 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1178 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1179 break;
1180 case VINF_EM_RESCHEDULE_REM:
1181 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1182 break;
1183 case VINF_EM_RAW_TO_R3:
1184 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1185 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1186 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1187 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1188 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1189 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1190 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1191 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1192 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1193 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1194 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1195 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1196 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1197 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1198 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1199 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1200 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1201 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1202 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1203 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1204 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1205 else
1206 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1207 break;
1208
1209 case VINF_EM_RAW_TIMER_PENDING:
1210 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1211 break;
1212 case VINF_EM_RAW_INTERRUPT_PENDING:
1213 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1214 break;
1215 case VINF_VMM_CALL_HOST:
1216 switch (pVCpu->vmm.s.enmCallRing3Operation)
1217 {
1218 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1219 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1220 break;
1221 case VMMCALLRING3_PDM_LOCK:
1222 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1223 break;
1224 case VMMCALLRING3_PGM_POOL_GROW:
1225 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1226 break;
1227 case VMMCALLRING3_PGM_LOCK:
1228 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1229 break;
1230 case VMMCALLRING3_PGM_MAP_CHUNK:
1231 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1232 break;
1233 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1234 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1235 break;
1236 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
1237 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
1238 break;
1239 case VMMCALLRING3_VMM_LOGGER_FLUSH:
1240 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
1241 break;
1242 case VMMCALLRING3_VM_SET_ERROR:
1243 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1244 break;
1245 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1246 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1247 break;
1248 case VMMCALLRING3_VM_R0_ASSERTION:
1249 default:
1250 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1251 break;
1252 }
1253 break;
1254 case VINF_PATM_DUPLICATE_FUNCTION:
1255 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1256 break;
1257 case VINF_PGM_CHANGE_MODE:
1258 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1259 break;
1260 case VINF_PGM_POOL_FLUSH_PENDING:
1261 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1262 break;
1263 case VINF_EM_PENDING_REQUEST:
1264 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1265 break;
1266 case VINF_EM_HM_PATCH_TPR_INSTR:
1267 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1268 break;
1269 default:
1270 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1271 break;
1272 }
1273}
1274#endif /* VBOX_WITH_STATISTICS */
1275
1276
1277/**
1278 * The Ring 0 entry point, called by the fast-ioctl path.
1279 *
1280 * @param pGVM The global (ring-0) VM structure.
1281 * @param pVM The cross context VM structure.
1282 * The return code is stored in pVM->vmm.s.iLastGZRc.
1283 * @param idCpu The Virtual CPU ID of the calling EMT.
1284 * @param enmOperation Which operation to execute.
1285 * @remarks Assume called with interrupts _enabled_.
1286 */
1287VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1288{
1289 /*
1290 * Validation.
1291 */
1292 if ( idCpu < pGVM->cCpus
1293 && pGVM->cCpus == pVM->cCpus)
1294 { /*likely*/ }
1295 else
1296 {
1297 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x/%#x\n", idCpu, pGVM->cCpus, pVM->cCpus);
1298 return;
1299 }
1300
1301 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1302 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1303 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1304 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1305 && pVCpu->hNativeThreadR0 == hNativeThread))
1306 { /* likely */ }
1307 else
1308 {
1309 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pVCpu->hNativeThreadR0=%p\n",
1310 idCpu, hNativeThread, pGVCpu->hEMT, pVCpu->hNativeThreadR0);
1311 return;
1312 }
1313
1314 /*
1315 * SMAP fun.
1316 */
1317 VMM_CHECK_SMAP_SETUP();
1318 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1319
1320 /*
1321 * Perform requested operation.
1322 */
1323 switch (enmOperation)
1324 {
1325 /*
1326 * Switch to GC and run guest raw mode code.
1327 * Disable interrupts before doing the world switch.
1328 */
1329 case VMMR0_DO_RAW_RUN:
1330 {
1331#ifdef VBOX_WITH_RAW_MODE
1332# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1333 /* Some safety precautions first. */
1334 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1335 {
1336 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
1337 break;
1338 }
1339# endif
1340 if (RT_SUCCESS(g_rcRawModeUsability))
1341 { /* likely */ }
1342 else
1343 {
1344 pVCpu->vmm.s.iLastGZRc = g_rcRawModeUsability;
1345 break;
1346 }
1347
1348 /*
1349 * Disable preemption.
1350 */
1351 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1352 RTThreadPreemptDisable(&PreemptState);
1353
1354 /*
1355 * Get the host CPU identifiers, make sure they are valid and that
1356 * we've got a TSC delta for the CPU.
1357 */
1358 RTCPUID idHostCpu;
1359 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1360 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1361 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1362 {
1363 /*
1364 * Commit the CPU identifiers and update the periodict preemption timer if it's active.
1365 */
1366# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1367 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1368# endif
1369 pVCpu->iHostCpuSet = iHostCpuSet;
1370 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1371
1372 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1373 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1374
1375 /*
1376 * We might need to disable VT-x if the active switcher turns off paging.
1377 */
1378 bool fVTxDisabled;
1379 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1380 if (RT_SUCCESS(rc))
1381 {
1382 /*
1383 * Disable interrupts and run raw-mode code. The loop is for efficiently
1384 * dispatching tracepoints that fired in raw-mode context.
1385 */
1386 RTCCUINTREG uFlags = ASMIntDisableFlags();
1387
1388 for (;;)
1389 {
1390 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
1391 TMNotifyStartOfExecution(pVCpu);
1392
1393 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1394 pVCpu->vmm.s.iLastGZRc = rc;
1395
1396 TMNotifyEndOfExecution(pVCpu);
1397 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1398
1399 if (rc != VINF_VMM_CALL_TRACER)
1400 break;
1401 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
1402 }
1403
1404 /*
1405 * Re-enable VT-x before we dispatch any pending host interrupts and
1406 * re-enables interrupts.
1407 */
1408 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1409
1410 if ( rc == VINF_EM_RAW_INTERRUPT
1411 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1412 TRPMR0DispatchHostInterrupt(pVM);
1413
1414 ASMSetFlags(uFlags);
1415
1416 /* Fire dtrace probe and collect statistics. */
1417 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1418# ifdef VBOX_WITH_STATISTICS
1419 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1420 vmmR0RecordRC(pVM, pVCpu, rc);
1421# endif
1422 }
1423 else
1424 pVCpu->vmm.s.iLastGZRc = rc;
1425
1426 /*
1427 * Invalidate the host CPU identifiers as we restore preemption.
1428 */
1429 pVCpu->iHostCpuSet = UINT32_MAX;
1430 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1431
1432 RTThreadPreemptRestore(&PreemptState);
1433 }
1434 /*
1435 * Invalid CPU set index or TSC delta in need of measuring.
1436 */
1437 else
1438 {
1439 RTThreadPreemptRestore(&PreemptState);
1440 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1441 {
1442 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1443 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1444 0 /*default cTries*/);
1445 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1446 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1447 else
1448 pVCpu->vmm.s.iLastGZRc = rc;
1449 }
1450 else
1451 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1452 }
1453
1454#else /* !VBOX_WITH_RAW_MODE */
1455 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_NOT_SUPPORTED;
1456#endif
1457 break;
1458 }
1459
1460 /*
1461 * Run guest code using the available hardware acceleration technology.
1462 */
1463 case VMMR0_DO_HM_RUN:
1464 {
1465 for (;;) /* hlt loop */
1466 {
1467 /*
1468 * Disable preemption.
1469 */
1470 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1471 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1472 RTThreadPreemptDisable(&PreemptState);
1473
1474 /*
1475 * Get the host CPU identifiers, make sure they are valid and that
1476 * we've got a TSC delta for the CPU.
1477 */
1478 RTCPUID idHostCpu;
1479 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1480 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1481 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1482 {
1483 pVCpu->iHostCpuSet = iHostCpuSet;
1484 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1485
1486 /*
1487 * Update the periodic preemption timer if it's active.
1488 */
1489 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1490 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1491 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1492
1493#ifdef VMM_R0_TOUCH_FPU
1494 /*
1495 * Make sure we've got the FPU state loaded so and we don't need to clear
1496 * CR0.TS and get out of sync with the host kernel when loading the guest
1497 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1498 */
1499 CPUMR0TouchHostFpu();
1500#endif
1501 int rc;
1502 bool fPreemptRestored = false;
1503 if (!HMR0SuspendPending())
1504 {
1505 /*
1506 * Enable the context switching hook.
1507 */
1508 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1509 {
1510 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1511 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1512 }
1513
1514 /*
1515 * Enter HM context.
1516 */
1517 rc = HMR0Enter(pVCpu);
1518 if (RT_SUCCESS(rc))
1519 {
1520 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1521
1522 /*
1523 * When preemption hooks are in place, enable preemption now that
1524 * we're in HM context.
1525 */
1526 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1527 {
1528 fPreemptRestored = true;
1529 RTThreadPreemptRestore(&PreemptState);
1530 }
1531
1532 /*
1533 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1534 */
1535 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1536 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1537 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1538
1539 /*
1540 * Assert sanity on the way out. Using manual assertions code here as normal
1541 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1542 */
1543 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1544 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1545 {
1546 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1547 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1548 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1549 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1550 }
1551 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1552 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1553 {
1554 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1555 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1556 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1557 rc = VERR_INVALID_STATE;
1558 }
1559
1560 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1561 }
1562 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1563
1564 /*
1565 * Invalidate the host CPU identifiers before we disable the context
1566 * hook / restore preemption.
1567 */
1568 pVCpu->iHostCpuSet = UINT32_MAX;
1569 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1570
1571 /*
1572 * Disable context hooks. Due to unresolved cleanup issues, we
1573 * cannot leave the hooks enabled when we return to ring-3.
1574 *
1575 * Note! At the moment HM may also have disabled the hook
1576 * when we get here, but the IPRT API handles that.
1577 */
1578 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1579 {
1580 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1581 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1582 }
1583 }
1584 /*
1585 * The system is about to go into suspend mode; go back to ring 3.
1586 */
1587 else
1588 {
1589 rc = VINF_EM_RAW_INTERRUPT;
1590 pVCpu->iHostCpuSet = UINT32_MAX;
1591 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1592 }
1593
1594 /** @todo When HM stops messing with the context hook state, we'll disable
1595 * preemption again before the RTThreadCtxHookDisable call. */
1596 if (!fPreemptRestored)
1597 RTThreadPreemptRestore(&PreemptState);
1598
1599 pVCpu->vmm.s.iLastGZRc = rc;
1600
1601 /* Fire dtrace probe and collect statistics. */
1602 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1603#ifdef VBOX_WITH_STATISTICS
1604 vmmR0RecordRC(pVM, pVCpu, rc);
1605#endif
1606#if 1
1607 /*
1608 * If this is a halt.
1609 */
1610 if (rc != VINF_EM_HALT)
1611 { /* we're not in a hurry for a HLT, so prefer this path */ }
1612 else
1613 {
1614 pVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pVM, pGVCpu, pVCpu);
1615 if (rc == VINF_SUCCESS)
1616 {
1617 pVCpu->vmm.s.cR0HaltsSucceeded++;
1618 continue;
1619 }
1620 pVCpu->vmm.s.cR0HaltsToRing3++;
1621 }
1622#endif
1623 }
1624 /*
1625 * Invalid CPU set index or TSC delta in need of measuring.
1626 */
1627 else
1628 {
1629 pVCpu->iHostCpuSet = UINT32_MAX;
1630 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1631 RTThreadPreemptRestore(&PreemptState);
1632 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1633 {
1634 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1635 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1636 0 /*default cTries*/);
1637 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1638 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1639 else
1640 pVCpu->vmm.s.iLastGZRc = rc;
1641 }
1642 else
1643 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1644 }
1645 break;
1646
1647 } /* halt loop. */
1648 break;
1649 }
1650
1651#ifdef VBOX_WITH_NEM_R0
1652# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1653 case VMMR0_DO_NEM_RUN:
1654 {
1655 /*
1656 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1657 */
1658 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1659 int rc = vmmR0CallRing3SetJmp2(&pVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1660 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1661 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1662
1663 pVCpu->vmm.s.iLastGZRc = rc;
1664
1665 /*
1666 * Fire dtrace probe and collect statistics.
1667 */
1668 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1669# ifdef VBOX_WITH_STATISTICS
1670 vmmR0RecordRC(pVM, pVCpu, rc);
1671# endif
1672 break;
1673 }
1674# endif
1675#endif
1676
1677
1678 /*
1679 * For profiling.
1680 */
1681 case VMMR0_DO_NOP:
1682 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1683 break;
1684
1685 /*
1686 * Shouldn't happen.
1687 */
1688 default:
1689 AssertMsgFailed(("%#x\n", enmOperation));
1690 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1691 break;
1692 }
1693 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1694}
1695
1696
1697/**
1698 * Validates a session or VM session argument.
1699 *
1700 * @returns true / false accordingly.
1701 * @param pVM The cross context VM structure.
1702 * @param pClaimedSession The session claim to validate.
1703 * @param pSession The session argument.
1704 */
1705DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1706{
1707 /* This must be set! */
1708 if (!pSession)
1709 return false;
1710
1711 /* Only one out of the two. */
1712 if (pVM && pClaimedSession)
1713 return false;
1714 if (pVM)
1715 pClaimedSession = pVM->pSession;
1716 return pClaimedSession == pSession;
1717}
1718
1719
1720/**
1721 * VMMR0EntryEx worker function, either called directly or when ever possible
1722 * called thru a longjmp so we can exit safely on failure.
1723 *
1724 * @returns VBox status code.
1725 * @param pGVM The global (ring-0) VM structure.
1726 * @param pVM The cross context VM structure.
1727 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1728 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1729 * @param enmOperation Which operation to execute.
1730 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1731 * The support driver validates this if it's present.
1732 * @param u64Arg Some simple constant argument.
1733 * @param pSession The session of the caller.
1734 *
1735 * @remarks Assume called with interrupts _enabled_.
1736 */
1737static int vmmR0EntryExWorker(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1738 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1739{
1740 /*
1741 * Validate pGVM, pVM and idCpu for consistency and validity.
1742 */
1743 if ( pGVM != NULL
1744 || pVM != NULL)
1745 {
1746 if (RT_LIKELY( RT_VALID_PTR(pGVM)
1747 && RT_VALID_PTR(pVM)
1748 && ((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0))
1749 { /* likely */ }
1750 else
1751 {
1752 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p and/or pVM=%p! (op=%d)\n", pGVM, pVM, enmOperation);
1753 return VERR_INVALID_POINTER;
1754 }
1755
1756 if (RT_LIKELY(pGVM->pVM == pVM))
1757 { /* likely */ }
1758 else
1759 {
1760 SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM->pVM=%p\n", pVM, pGVM->pVM);
1761 return VERR_INVALID_PARAMETER;
1762 }
1763
1764 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1765 { /* likely */ }
1766 else
1767 {
1768 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1769 return VERR_INVALID_PARAMETER;
1770 }
1771
1772 if (RT_LIKELY( pVM->enmVMState >= VMSTATE_CREATING
1773 && pVM->enmVMState <= VMSTATE_TERMINATED
1774 && pVM->cCpus == pGVM->cCpus
1775 && pVM->pSession == pSession
1776 && pVM->pVMR0 == pVM))
1777 { /* likely */ }
1778 else
1779 {
1780 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{.enmVMState=%d, .cCpus=%#x(==%#x), .pSession=%p(==%p), .pVMR0=%p(==%p)}! (op=%d)\n",
1781 pVM, pVM->enmVMState, pVM->cCpus, pGVM->cCpus, pVM->pSession, pSession, pVM->pVMR0, pVM, enmOperation);
1782 return VERR_INVALID_POINTER;
1783 }
1784 }
1785 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1786 { /* likely */ }
1787 else
1788 {
1789 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1790 return VERR_INVALID_PARAMETER;
1791 }
1792
1793 /*
1794 * SMAP fun.
1795 */
1796 VMM_CHECK_SMAP_SETUP();
1797 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1798
1799 /*
1800 * Process the request.
1801 */
1802 int rc;
1803 switch (enmOperation)
1804 {
1805 /*
1806 * GVM requests
1807 */
1808 case VMMR0_DO_GVMM_CREATE_VM:
1809 if (pGVM == NULL && pVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1810 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1811 else
1812 rc = VERR_INVALID_PARAMETER;
1813 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1814 break;
1815
1816 case VMMR0_DO_GVMM_DESTROY_VM:
1817 if (pReqHdr == NULL && u64Arg == 0)
1818 rc = GVMMR0DestroyVM(pGVM, pVM);
1819 else
1820 rc = VERR_INVALID_PARAMETER;
1821 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1822 break;
1823
1824 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1825 if (pGVM != NULL && pVM != NULL)
1826 rc = GVMMR0RegisterVCpu(pGVM, pVM, idCpu);
1827 else
1828 rc = VERR_INVALID_PARAMETER;
1829 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1830 break;
1831
1832 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1833 if (pGVM != NULL && pVM != NULL)
1834 rc = GVMMR0DeregisterVCpu(pGVM, pVM, idCpu);
1835 else
1836 rc = VERR_INVALID_PARAMETER;
1837 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1838 break;
1839
1840 case VMMR0_DO_GVMM_SCHED_HALT:
1841 if (pReqHdr)
1842 return VERR_INVALID_PARAMETER;
1843 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1844 rc = GVMMR0SchedHaltReq(pGVM, pVM, idCpu, u64Arg);
1845 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1846 break;
1847
1848 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1849 if (pReqHdr || u64Arg)
1850 return VERR_INVALID_PARAMETER;
1851 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1852 rc = GVMMR0SchedWakeUp(pGVM, pVM, idCpu);
1853 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1854 break;
1855
1856 case VMMR0_DO_GVMM_SCHED_POKE:
1857 if (pReqHdr || u64Arg)
1858 return VERR_INVALID_PARAMETER;
1859 rc = GVMMR0SchedPoke(pGVM, pVM, idCpu);
1860 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1861 break;
1862
1863 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1864 if (u64Arg)
1865 return VERR_INVALID_PARAMETER;
1866 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1867 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1868 break;
1869
1870 case VMMR0_DO_GVMM_SCHED_POLL:
1871 if (pReqHdr || u64Arg > 1)
1872 return VERR_INVALID_PARAMETER;
1873 rc = GVMMR0SchedPoll(pGVM, pVM, idCpu, !!u64Arg);
1874 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1875 break;
1876
1877 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1878 if (u64Arg)
1879 return VERR_INVALID_PARAMETER;
1880 rc = GVMMR0QueryStatisticsReq(pGVM, pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1881 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1882 break;
1883
1884 case VMMR0_DO_GVMM_RESET_STATISTICS:
1885 if (u64Arg)
1886 return VERR_INVALID_PARAMETER;
1887 rc = GVMMR0ResetStatisticsReq(pGVM, pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1888 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1889 break;
1890
1891 /*
1892 * Initialize the R0 part of a VM instance.
1893 */
1894 case VMMR0_DO_VMMR0_INIT:
1895 rc = vmmR0InitVM(pGVM, pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1896 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1897 break;
1898
1899 /*
1900 * Does EMT specific ring-0 init.
1901 */
1902 case VMMR0_DO_VMMR0_INIT_EMT:
1903 rc = vmmR0InitVMEmt(pGVM, pVM, idCpu);
1904 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1905 break;
1906
1907 /*
1908 * Terminate the R0 part of a VM instance.
1909 */
1910 case VMMR0_DO_VMMR0_TERM:
1911 rc = VMMR0TermVM(pGVM, pVM, 0 /*idCpu*/);
1912 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1913 break;
1914
1915 /*
1916 * Attempt to enable hm mode and check the current setting.
1917 */
1918 case VMMR0_DO_HM_ENABLE:
1919 rc = HMR0EnableAllCpus(pVM);
1920 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1921 break;
1922
1923 /*
1924 * Setup the hardware accelerated session.
1925 */
1926 case VMMR0_DO_HM_SETUP_VM:
1927 rc = HMR0SetupVM(pVM);
1928 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1929 break;
1930
1931 /*
1932 * Switch to RC to execute Hypervisor function.
1933 */
1934 case VMMR0_DO_CALL_HYPERVISOR:
1935 {
1936#ifdef VBOX_WITH_RAW_MODE
1937 /*
1938 * Validate input / context.
1939 */
1940 if (RT_UNLIKELY(idCpu != 0))
1941 return VERR_INVALID_CPU_ID;
1942 if (RT_UNLIKELY(pVM->cCpus != 1))
1943 return VERR_INVALID_PARAMETER;
1944 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1945# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1946 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1947 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1948# endif
1949 if (RT_FAILURE(g_rcRawModeUsability))
1950 return g_rcRawModeUsability;
1951
1952 /*
1953 * Disable interrupts.
1954 */
1955 RTCCUINTREG fFlags = ASMIntDisableFlags();
1956
1957 /*
1958 * Get the host CPU identifiers, make sure they are valid and that
1959 * we've got a TSC delta for the CPU.
1960 */
1961 RTCPUID idHostCpu;
1962 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1963 if (RT_UNLIKELY(iHostCpuSet >= RTCPUSET_MAX_CPUS))
1964 {
1965 ASMSetFlags(fFlags);
1966 return VERR_INVALID_CPU_INDEX;
1967 }
1968 if (RT_UNLIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1969 {
1970 ASMSetFlags(fFlags);
1971 rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1972 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1973 0 /*default cTries*/);
1974 if (RT_FAILURE(rc) && rc != VERR_CPU_OFFLINE)
1975 {
1976 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1977 return rc;
1978 }
1979 }
1980
1981 /*
1982 * Commit the CPU identifiers.
1983 */
1984# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1985 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1986# endif
1987 pVCpu->iHostCpuSet = iHostCpuSet;
1988 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1989
1990 /*
1991 * We might need to disable VT-x if the active switcher turns off paging.
1992 */
1993 bool fVTxDisabled;
1994 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1995 if (RT_SUCCESS(rc))
1996 {
1997 /*
1998 * Go through the wormhole...
1999 */
2000 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
2001
2002 /*
2003 * Re-enable VT-x before we dispatch any pending host interrupts.
2004 */
2005 HMR0LeaveSwitcher(pVM, fVTxDisabled);
2006
2007 if ( rc == VINF_EM_RAW_INTERRUPT
2008 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
2009 TRPMR0DispatchHostInterrupt(pVM);
2010 }
2011
2012 /*
2013 * Invalidate the host CPU identifiers as we restore interrupts.
2014 */
2015 pVCpu->iHostCpuSet = UINT32_MAX;
2016 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
2017 ASMSetFlags(fFlags);
2018
2019#else /* !VBOX_WITH_RAW_MODE */
2020 rc = VERR_RAW_MODE_NOT_SUPPORTED;
2021#endif
2022 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2023 break;
2024 }
2025
2026 /*
2027 * PGM wrappers.
2028 */
2029 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
2030 if (idCpu == NIL_VMCPUID)
2031 return VERR_INVALID_CPU_ID;
2032 rc = PGMR0PhysAllocateHandyPages(pGVM, pVM, idCpu);
2033 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2034 break;
2035
2036 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
2037 if (idCpu == NIL_VMCPUID)
2038 return VERR_INVALID_CPU_ID;
2039 rc = PGMR0PhysFlushHandyPages(pGVM, pVM, idCpu);
2040 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2041 break;
2042
2043 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
2044 if (idCpu == NIL_VMCPUID)
2045 return VERR_INVALID_CPU_ID;
2046 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, pVM, idCpu);
2047 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2048 break;
2049
2050 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
2051 if (idCpu != 0)
2052 return VERR_INVALID_CPU_ID;
2053 rc = PGMR0PhysSetupIoMmu(pGVM, pVM);
2054 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2055 break;
2056
2057 /*
2058 * GMM wrappers.
2059 */
2060 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2061 if (u64Arg)
2062 return VERR_INVALID_PARAMETER;
2063 rc = GMMR0InitialReservationReq(pGVM, pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
2064 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2065 break;
2066
2067 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2068 if (u64Arg)
2069 return VERR_INVALID_PARAMETER;
2070 rc = GMMR0UpdateReservationReq(pGVM, pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
2071 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2072 break;
2073
2074 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2075 if (u64Arg)
2076 return VERR_INVALID_PARAMETER;
2077 rc = GMMR0AllocatePagesReq(pGVM, pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
2078 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2079 break;
2080
2081 case VMMR0_DO_GMM_FREE_PAGES:
2082 if (u64Arg)
2083 return VERR_INVALID_PARAMETER;
2084 rc = GMMR0FreePagesReq(pGVM, pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
2085 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2086 break;
2087
2088 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
2089 if (u64Arg)
2090 return VERR_INVALID_PARAMETER;
2091 rc = GMMR0FreeLargePageReq(pGVM, pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
2092 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2093 break;
2094
2095 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
2096 if (u64Arg)
2097 return VERR_INVALID_PARAMETER;
2098 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
2099 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2100 break;
2101
2102 case VMMR0_DO_GMM_QUERY_MEM_STATS:
2103 if (idCpu == NIL_VMCPUID)
2104 return VERR_INVALID_CPU_ID;
2105 if (u64Arg)
2106 return VERR_INVALID_PARAMETER;
2107 rc = GMMR0QueryMemoryStatsReq(pGVM, pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
2108 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2109 break;
2110
2111 case VMMR0_DO_GMM_BALLOONED_PAGES:
2112 if (u64Arg)
2113 return VERR_INVALID_PARAMETER;
2114 rc = GMMR0BalloonedPagesReq(pGVM, pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
2115 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2116 break;
2117
2118 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
2119 if (u64Arg)
2120 return VERR_INVALID_PARAMETER;
2121 rc = GMMR0MapUnmapChunkReq(pGVM, pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
2122 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2123 break;
2124
2125 case VMMR0_DO_GMM_SEED_CHUNK:
2126 if (pReqHdr)
2127 return VERR_INVALID_PARAMETER;
2128 rc = GMMR0SeedChunk(pGVM, pVM, idCpu, (RTR3PTR)u64Arg);
2129 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2130 break;
2131
2132 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
2133 if (idCpu == NIL_VMCPUID)
2134 return VERR_INVALID_CPU_ID;
2135 if (u64Arg)
2136 return VERR_INVALID_PARAMETER;
2137 rc = GMMR0RegisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
2138 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2139 break;
2140
2141 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
2142 if (idCpu == NIL_VMCPUID)
2143 return VERR_INVALID_CPU_ID;
2144 if (u64Arg)
2145 return VERR_INVALID_PARAMETER;
2146 rc = GMMR0UnregisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
2147 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2148 break;
2149
2150 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
2151 if (idCpu == NIL_VMCPUID)
2152 return VERR_INVALID_CPU_ID;
2153 if ( u64Arg
2154 || pReqHdr)
2155 return VERR_INVALID_PARAMETER;
2156 rc = GMMR0ResetSharedModules(pGVM, pVM, idCpu);
2157 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2158 break;
2159
2160#ifdef VBOX_WITH_PAGE_SHARING
2161 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
2162 {
2163 if (idCpu == NIL_VMCPUID)
2164 return VERR_INVALID_CPU_ID;
2165 if ( u64Arg
2166 || pReqHdr)
2167 return VERR_INVALID_PARAMETER;
2168 rc = GMMR0CheckSharedModules(pGVM, pVM, idCpu);
2169 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2170 break;
2171 }
2172#endif
2173
2174#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
2175 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
2176 if (u64Arg)
2177 return VERR_INVALID_PARAMETER;
2178 rc = GMMR0FindDuplicatePageReq(pGVM, pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
2179 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2180 break;
2181#endif
2182
2183 case VMMR0_DO_GMM_QUERY_STATISTICS:
2184 if (u64Arg)
2185 return VERR_INVALID_PARAMETER;
2186 rc = GMMR0QueryStatisticsReq(pGVM, pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
2187 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2188 break;
2189
2190 case VMMR0_DO_GMM_RESET_STATISTICS:
2191 if (u64Arg)
2192 return VERR_INVALID_PARAMETER;
2193 rc = GMMR0ResetStatisticsReq(pGVM, pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
2194 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2195 break;
2196
2197 /*
2198 * A quick GCFGM mock-up.
2199 */
2200 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
2201 case VMMR0_DO_GCFGM_SET_VALUE:
2202 case VMMR0_DO_GCFGM_QUERY_VALUE:
2203 {
2204 if (pGVM || pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2205 return VERR_INVALID_PARAMETER;
2206 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
2207 if (pReq->Hdr.cbReq != sizeof(*pReq))
2208 return VERR_INVALID_PARAMETER;
2209 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
2210 {
2211 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2212 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2213 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2214 }
2215 else
2216 {
2217 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2218 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2219 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2220 }
2221 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2222 break;
2223 }
2224
2225 /*
2226 * PDM Wrappers.
2227 */
2228 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2229 {
2230 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2231 return VERR_INVALID_PARAMETER;
2232 rc = PDMR0DriverCallReqHandler(pGVM, pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2233 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2234 break;
2235 }
2236
2237 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
2238 {
2239 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2240 return VERR_INVALID_PARAMETER;
2241 rc = PDMR0DeviceCallReqHandler(pGVM, pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
2242 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2243 break;
2244 }
2245
2246 /*
2247 * Requests to the internal networking service.
2248 */
2249 case VMMR0_DO_INTNET_OPEN:
2250 {
2251 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2252 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2253 return VERR_INVALID_PARAMETER;
2254 rc = IntNetR0OpenReq(pSession, pReq);
2255 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2256 break;
2257 }
2258
2259 case VMMR0_DO_INTNET_IF_CLOSE:
2260 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2261 return VERR_INVALID_PARAMETER;
2262 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2263 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2264 break;
2265
2266
2267 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2268 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2269 return VERR_INVALID_PARAMETER;
2270 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2271 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2272 break;
2273
2274 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2275 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2276 return VERR_INVALID_PARAMETER;
2277 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2278 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2279 break;
2280
2281 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2282 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2283 return VERR_INVALID_PARAMETER;
2284 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2285 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2286 break;
2287
2288 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2289 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2290 return VERR_INVALID_PARAMETER;
2291 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2292 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2293 break;
2294
2295 case VMMR0_DO_INTNET_IF_SEND:
2296 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2297 return VERR_INVALID_PARAMETER;
2298 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2299 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2300 break;
2301
2302 case VMMR0_DO_INTNET_IF_WAIT:
2303 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2304 return VERR_INVALID_PARAMETER;
2305 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2306 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2307 break;
2308
2309 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2310 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2311 return VERR_INVALID_PARAMETER;
2312 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2313 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2314 break;
2315
2316#ifdef VBOX_WITH_PCI_PASSTHROUGH
2317 /*
2318 * Requests to host PCI driver service.
2319 */
2320 case VMMR0_DO_PCIRAW_REQ:
2321 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2322 return VERR_INVALID_PARAMETER;
2323 rc = PciRawR0ProcessReq(pGVM, pVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2324 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2325 break;
2326#endif
2327
2328 /*
2329 * NEM requests.
2330 */
2331#ifdef VBOX_WITH_NEM_R0
2332# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2333 case VMMR0_DO_NEM_INIT_VM:
2334 if (u64Arg || pReqHdr || idCpu != 0)
2335 return VERR_INVALID_PARAMETER;
2336 rc = NEMR0InitVM(pGVM, pVM);
2337 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2338 break;
2339
2340 case VMMR0_DO_NEM_INIT_VM_PART_2:
2341 if (u64Arg || pReqHdr || idCpu != 0)
2342 return VERR_INVALID_PARAMETER;
2343 rc = NEMR0InitVMPart2(pGVM, pVM);
2344 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2345 break;
2346
2347 case VMMR0_DO_NEM_MAP_PAGES:
2348 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2349 return VERR_INVALID_PARAMETER;
2350 rc = NEMR0MapPages(pGVM, pVM, idCpu);
2351 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2352 break;
2353
2354 case VMMR0_DO_NEM_UNMAP_PAGES:
2355 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2356 return VERR_INVALID_PARAMETER;
2357 rc = NEMR0UnmapPages(pGVM, pVM, idCpu);
2358 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2359 break;
2360
2361 case VMMR0_DO_NEM_EXPORT_STATE:
2362 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2363 return VERR_INVALID_PARAMETER;
2364 rc = NEMR0ExportState(pGVM, pVM, idCpu);
2365 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2366 break;
2367
2368 case VMMR0_DO_NEM_IMPORT_STATE:
2369 if (pReqHdr || idCpu == NIL_VMCPUID)
2370 return VERR_INVALID_PARAMETER;
2371 rc = NEMR0ImportState(pGVM, pVM, idCpu, u64Arg);
2372 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2373 break;
2374
2375 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2376 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2377 return VERR_INVALID_PARAMETER;
2378 rc = NEMR0QueryCpuTick(pGVM, pVM, idCpu);
2379 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2380 break;
2381
2382 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2383 if (pReqHdr || idCpu == NIL_VMCPUID)
2384 return VERR_INVALID_PARAMETER;
2385 rc = NEMR0ResumeCpuTickOnAll(pGVM, pVM, idCpu, u64Arg);
2386 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2387 break;
2388
2389 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2390 if (u64Arg || pReqHdr)
2391 return VERR_INVALID_PARAMETER;
2392 rc = NEMR0UpdateStatistics(pGVM, pVM, idCpu);
2393 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2394 break;
2395
2396# if 1 && defined(DEBUG_bird)
2397 case VMMR0_DO_NEM_EXPERIMENT:
2398 if (pReqHdr)
2399 return VERR_INVALID_PARAMETER;
2400 rc = NEMR0DoExperiment(pGVM, pVM, idCpu, u64Arg);
2401 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2402 break;
2403# endif
2404# endif
2405#endif
2406
2407 /*
2408 * For profiling.
2409 */
2410 case VMMR0_DO_NOP:
2411 case VMMR0_DO_SLOW_NOP:
2412 return VINF_SUCCESS;
2413
2414 /*
2415 * For testing Ring-0 APIs invoked in this environment.
2416 */
2417 case VMMR0_DO_TESTS:
2418 /** @todo make new test */
2419 return VINF_SUCCESS;
2420
2421
2422#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
2423 case VMMR0_DO_TEST_SWITCHER3264:
2424 if (idCpu == NIL_VMCPUID)
2425 return VERR_INVALID_CPU_ID;
2426 rc = HMR0TestSwitcher3264(pVM);
2427 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2428 break;
2429#endif
2430 default:
2431 /*
2432 * We're returning VERR_NOT_SUPPORT here so we've got something else
2433 * than -1 which the interrupt gate glue code might return.
2434 */
2435 Log(("operation %#x is not supported\n", enmOperation));
2436 return VERR_NOT_SUPPORTED;
2437 }
2438 return rc;
2439}
2440
2441
2442/**
2443 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
2444 */
2445typedef struct VMMR0ENTRYEXARGS
2446{
2447 PGVM pGVM;
2448 PVM pVM;
2449 VMCPUID idCpu;
2450 VMMR0OPERATION enmOperation;
2451 PSUPVMMR0REQHDR pReq;
2452 uint64_t u64Arg;
2453 PSUPDRVSESSION pSession;
2454} VMMR0ENTRYEXARGS;
2455/** Pointer to a vmmR0EntryExWrapper argument package. */
2456typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
2457
2458/**
2459 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2460 *
2461 * @returns VBox status code.
2462 * @param pvArgs The argument package
2463 */
2464static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2465{
2466 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2467 ((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
2468 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2469 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2470 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2471 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2472 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2473}
2474
2475
2476/**
2477 * The Ring 0 entry point, called by the support library (SUP).
2478 *
2479 * @returns VBox status code.
2480 * @param pGVM The global (ring-0) VM structure.
2481 * @param pVM The cross context VM structure.
2482 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2483 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2484 * @param enmOperation Which operation to execute.
2485 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2486 * @param u64Arg Some simple constant argument.
2487 * @param pSession The session of the caller.
2488 * @remarks Assume called with interrupts _enabled_.
2489 */
2490VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2491 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2492{
2493 /*
2494 * Requests that should only happen on the EMT thread will be
2495 * wrapped in a setjmp so we can assert without causing trouble.
2496 */
2497 if ( pVM != NULL
2498 && pGVM != NULL
2499 && idCpu < pGVM->cCpus
2500 && pVM->pVMR0 != NULL)
2501 {
2502 switch (enmOperation)
2503 {
2504 /* These might/will be called before VMMR3Init. */
2505 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2506 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2507 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2508 case VMMR0_DO_GMM_FREE_PAGES:
2509 case VMMR0_DO_GMM_BALLOONED_PAGES:
2510 /* On the mac we might not have a valid jmp buf, so check these as well. */
2511 case VMMR0_DO_VMMR0_INIT:
2512 case VMMR0_DO_VMMR0_TERM:
2513 {
2514 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2515 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2516 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2517 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2518 && pVCpu->hNativeThreadR0 == hNativeThread))
2519 {
2520 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2521 break;
2522
2523 /** @todo validate this EMT claim... GVM knows. */
2524 VMMR0ENTRYEXARGS Args;
2525 Args.pGVM = pGVM;
2526 Args.pVM = pVM;
2527 Args.idCpu = idCpu;
2528 Args.enmOperation = enmOperation;
2529 Args.pReq = pReq;
2530 Args.u64Arg = u64Arg;
2531 Args.pSession = pSession;
2532 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2533 }
2534 return VERR_VM_THREAD_NOT_EMT;
2535 }
2536
2537 default:
2538 break;
2539 }
2540 }
2541 return vmmR0EntryExWorker(pGVM, pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2542}
2543
2544
2545/**
2546 * Checks whether we've armed the ring-0 long jump machinery.
2547 *
2548 * @returns @c true / @c false
2549 * @param pVCpu The cross context virtual CPU structure.
2550 * @thread EMT
2551 * @sa VMMIsLongJumpArmed
2552 */
2553VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
2554{
2555#ifdef RT_ARCH_X86
2556 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2557 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2558#else
2559 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2560 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2561#endif
2562}
2563
2564
2565/**
2566 * Checks whether we've done a ring-3 long jump.
2567 *
2568 * @returns @c true / @c false
2569 * @param pVCpu The cross context virtual CPU structure.
2570 * @thread EMT
2571 */
2572VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
2573{
2574 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2575}
2576
2577
2578/**
2579 * Internal R0 logger worker: Flush logger.
2580 *
2581 * @param pLogger The logger instance to flush.
2582 * @remark This function must be exported!
2583 */
2584VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2585{
2586#ifdef LOG_ENABLED
2587 /*
2588 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2589 * (This is a bit paranoid code.)
2590 */
2591 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_UOFFSETOF(VMMR0LOGGER, Logger));
2592 if ( !VALID_PTR(pR0Logger)
2593 || !VALID_PTR(pR0Logger + 1)
2594 || pLogger->u32Magic != RTLOGGER_MAGIC)
2595 {
2596# ifdef DEBUG
2597 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2598# endif
2599 return;
2600 }
2601 if (pR0Logger->fFlushingDisabled)
2602 return; /* quietly */
2603
2604 PVM pVM = pR0Logger->pVM;
2605 if ( !VALID_PTR(pVM)
2606 || pVM->pVMR0 != pVM)
2607 {
2608# ifdef DEBUG
2609 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
2610# endif
2611 return;
2612 }
2613
2614 PVMCPU pVCpu = VMMGetCpu(pVM);
2615 if (pVCpu)
2616 {
2617 /*
2618 * Check that the jump buffer is armed.
2619 */
2620# ifdef RT_ARCH_X86
2621 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2622 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2623# else
2624 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2625 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2626# endif
2627 {
2628# ifdef DEBUG
2629 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2630# endif
2631 return;
2632 }
2633 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2634 }
2635# ifdef DEBUG
2636 else
2637 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2638# endif
2639#else
2640 NOREF(pLogger);
2641#endif /* LOG_ENABLED */
2642}
2643
2644#ifdef LOG_ENABLED
2645
2646/**
2647 * Disables flushing of the ring-0 debug log.
2648 *
2649 * @param pVCpu The cross context virtual CPU structure.
2650 */
2651VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
2652{
2653 if (pVCpu->vmm.s.pR0LoggerR0)
2654 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2655 if (pVCpu->vmm.s.pR0RelLoggerR0)
2656 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = true;
2657}
2658
2659
2660/**
2661 * Enables flushing of the ring-0 debug log.
2662 *
2663 * @param pVCpu The cross context virtual CPU structure.
2664 */
2665VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
2666{
2667 if (pVCpu->vmm.s.pR0LoggerR0)
2668 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2669 if (pVCpu->vmm.s.pR0RelLoggerR0)
2670 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = false;
2671}
2672
2673
2674/**
2675 * Checks if log flushing is disabled or not.
2676 *
2677 * @param pVCpu The cross context virtual CPU structure.
2678 */
2679VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
2680{
2681 if (pVCpu->vmm.s.pR0LoggerR0)
2682 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2683 if (pVCpu->vmm.s.pR0RelLoggerR0)
2684 return pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled;
2685 return true;
2686}
2687
2688#endif /* LOG_ENABLED */
2689
2690/**
2691 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
2692 */
2693DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
2694{
2695 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
2696 if (pGVCpu)
2697 {
2698 PVMCPU pVCpu = pGVCpu->pVCpu;
2699 if (RT_VALID_PTR(pVCpu))
2700 {
2701 PVMMR0LOGGER pVmmLogger = pVCpu->vmm.s.pR0RelLoggerR0;
2702 if (RT_VALID_PTR(pVmmLogger))
2703 {
2704 if ( pVmmLogger->fCreated
2705 && pVmmLogger->pVM == pGVCpu->pVM)
2706 {
2707 if (pVmmLogger->Logger.fFlags & RTLOGFLAGS_DISABLED)
2708 return NULL;
2709 uint16_t const fFlags = RT_LO_U16(fFlagsAndGroup);
2710 uint16_t const iGroup = RT_HI_U16(fFlagsAndGroup);
2711 if ( iGroup != UINT16_MAX
2712 && ( ( pVmmLogger->Logger.afGroups[iGroup < pVmmLogger->Logger.cGroups ? iGroup : 0]
2713 & (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED))
2714 != (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED)))
2715 return NULL;
2716 return &pVmmLogger->Logger;
2717 }
2718 }
2719 }
2720 }
2721 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
2722}
2723
2724
2725/**
2726 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2727 *
2728 * @returns true if the breakpoint should be hit, false if it should be ignored.
2729 */
2730DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2731{
2732#if 0
2733 return true;
2734#else
2735 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2736 if (pVM)
2737 {
2738 PVMCPU pVCpu = VMMGetCpu(pVM);
2739
2740 if (pVCpu)
2741 {
2742#ifdef RT_ARCH_X86
2743 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2744 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2745#else
2746 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2747 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2748#endif
2749 {
2750 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2751 return RT_FAILURE_NP(rc);
2752 }
2753 }
2754 }
2755#ifdef RT_OS_LINUX
2756 return true;
2757#else
2758 return false;
2759#endif
2760#endif
2761}
2762
2763
2764/**
2765 * Override this so we can push it up to ring-3.
2766 *
2767 * @param pszExpr Expression. Can be NULL.
2768 * @param uLine Location line number.
2769 * @param pszFile Location file name.
2770 * @param pszFunction Location function name.
2771 */
2772DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2773{
2774 /*
2775 * To the log.
2776 */
2777 LogAlways(("\n!!R0-Assertion Failed!!\n"
2778 "Expression: %s\n"
2779 "Location : %s(%d) %s\n",
2780 pszExpr, pszFile, uLine, pszFunction));
2781
2782 /*
2783 * To the global VMM buffer.
2784 */
2785 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2786 if (pVM)
2787 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2788 "\n!!R0-Assertion Failed!!\n"
2789 "Expression: %.*s\n"
2790 "Location : %s(%d) %s\n",
2791 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2792 pszFile, uLine, pszFunction);
2793
2794 /*
2795 * Continue the normal way.
2796 */
2797 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2798}
2799
2800
2801/**
2802 * Callback for RTLogFormatV which writes to the ring-3 log port.
2803 * See PFNLOGOUTPUT() for details.
2804 */
2805static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2806{
2807 for (size_t i = 0; i < cbChars; i++)
2808 {
2809 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2810 }
2811
2812 NOREF(pv);
2813 return cbChars;
2814}
2815
2816
2817/**
2818 * Override this so we can push it up to ring-3.
2819 *
2820 * @param pszFormat The format string.
2821 * @param va Arguments.
2822 */
2823DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2824{
2825 va_list vaCopy;
2826
2827 /*
2828 * Push the message to the loggers.
2829 */
2830 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2831 if (pLog)
2832 {
2833 va_copy(vaCopy, va);
2834 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2835 va_end(vaCopy);
2836 }
2837 pLog = RTLogRelGetDefaultInstance();
2838 if (pLog)
2839 {
2840 va_copy(vaCopy, va);
2841 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2842 va_end(vaCopy);
2843 }
2844
2845 /*
2846 * Push it to the global VMM buffer.
2847 */
2848 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2849 if (pVM)
2850 {
2851 va_copy(vaCopy, va);
2852 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2853 va_end(vaCopy);
2854 }
2855
2856 /*
2857 * Continue the normal way.
2858 */
2859 RTAssertMsg2V(pszFormat, va);
2860}
2861
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette