VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 75649

Last change on this file since 75649 was 75649, checked in by vboxsync, 6 years ago

VMM/VMMR0.cpp: comments adjustments.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 103.8 KB
Line 
1/* $Id: VMMR0.cpp 75649 2018-11-21 18:09:06Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#ifdef VBOX_WITH_NEM_R0
30# include <VBox/vmm/nem.h>
31#endif
32#include <VBox/vmm/em.h>
33#include <VBox/vmm/stam.h>
34#include <VBox/vmm/tm.h>
35#include "VMMInternal.h"
36#include <VBox/vmm/vm.h>
37#include <VBox/vmm/gvm.h>
38#ifdef VBOX_WITH_PCI_PASSTHROUGH
39# include <VBox/vmm/pdmpci.h>
40#endif
41#include <VBox/vmm/apic.h>
42
43#include <VBox/vmm/gvmm.h>
44#include <VBox/vmm/gmm.h>
45#include <VBox/vmm/gim.h>
46#include <VBox/intnet.h>
47#include <VBox/vmm/hm.h>
48#include <VBox/param.h>
49#include <VBox/err.h>
50#include <VBox/version.h>
51#include <VBox/log.h>
52
53#include <iprt/asm-amd64-x86.h>
54#include <iprt/assert.h>
55#include <iprt/crc.h>
56#include <iprt/mp.h>
57#include <iprt/once.h>
58#include <iprt/stdarg.h>
59#include <iprt/string.h>
60#include <iprt/thread.h>
61#include <iprt/timer.h>
62#include <iprt/time.h>
63
64#include "dtrace/VBoxVMM.h"
65
66
67#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
68# pragma intrinsic(_AddressOfReturnAddress)
69#endif
70
71#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
72# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
73#endif
74
75
76
77/*********************************************************************************************************************************
78* Defined Constants And Macros *
79*********************************************************************************************************************************/
80/** @def VMM_CHECK_SMAP_SETUP
81 * SMAP check setup. */
82/** @def VMM_CHECK_SMAP_CHECK
83 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
84 * it will be logged and @a a_BadExpr is executed. */
85/** @def VMM_CHECK_SMAP_CHECK2
86 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
87 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
88 * executed. */
89#if defined(VBOX_STRICT) || 1
90# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
91# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
92 do { \
93 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
94 { \
95 RTCCUINTREG fEflCheck = ASMGetFlags(); \
96 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
97 { /* likely */ } \
98 else \
99 { \
100 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
101 a_BadExpr; \
102 } \
103 } \
104 } while (0)
105# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
106 do { \
107 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
108 { \
109 RTCCUINTREG fEflCheck = ASMGetFlags(); \
110 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
111 { /* likely */ } \
112 else \
113 { \
114 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
115 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
116 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
117 a_BadExpr; \
118 } \
119 } \
120 } while (0)
121#else
122# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
123# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
124# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) NOREF(fKernelFeatures)
125#endif
126
127
128/*********************************************************************************************************************************
129* Internal Functions *
130*********************************************************************************************************************************/
131RT_C_DECLS_BEGIN
132#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
133extern uint64_t __udivdi3(uint64_t, uint64_t);
134extern uint64_t __umoddi3(uint64_t, uint64_t);
135#endif
136RT_C_DECLS_END
137
138
139/*********************************************************************************************************************************
140* Global Variables *
141*********************************************************************************************************************************/
142/** Drag in necessary library bits.
143 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
144PFNRT g_VMMR0Deps[] =
145{
146 (PFNRT)RTCrc32,
147 (PFNRT)RTOnce,
148#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
149 (PFNRT)__udivdi3,
150 (PFNRT)__umoddi3,
151#endif
152 NULL
153};
154
155#ifdef RT_OS_SOLARIS
156/* Dependency information for the native solaris loader. */
157extern "C" { char _depends_on[] = "vboxdrv"; }
158#endif
159
160/** The result of SUPR0GetRawModeUsability(), set by ModuleInit(). */
161int g_rcRawModeUsability = VINF_SUCCESS;
162
163
164/**
165 * Initialize the module.
166 * This is called when we're first loaded.
167 *
168 * @returns 0 on success.
169 * @returns VBox status on failure.
170 * @param hMod Image handle for use in APIs.
171 */
172DECLEXPORT(int) ModuleInit(void *hMod)
173{
174 VMM_CHECK_SMAP_SETUP();
175 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
176
177#ifdef VBOX_WITH_DTRACE_R0
178 /*
179 * The first thing to do is register the static tracepoints.
180 * (Deregistration is automatic.)
181 */
182 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
183 if (RT_FAILURE(rc2))
184 return rc2;
185#endif
186 LogFlow(("ModuleInit:\n"));
187
188#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
189 /*
190 * Display the CMOS debug code.
191 */
192 ASMOutU8(0x72, 0x03);
193 uint8_t bDebugCode = ASMInU8(0x73);
194 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
195 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
196#endif
197
198 /*
199 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
200 */
201 int rc = vmmInitFormatTypes();
202 if (RT_SUCCESS(rc))
203 {
204 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
205 rc = GVMMR0Init();
206 if (RT_SUCCESS(rc))
207 {
208 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
209 rc = GMMR0Init();
210 if (RT_SUCCESS(rc))
211 {
212 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
213 rc = HMR0Init();
214 if (RT_SUCCESS(rc))
215 {
216 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
217 rc = PGMRegisterStringFormatTypes();
218 if (RT_SUCCESS(rc))
219 {
220 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
221#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
222 rc = PGMR0DynMapInit();
223#endif
224 if (RT_SUCCESS(rc))
225 {
226 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
227 rc = IntNetR0Init();
228 if (RT_SUCCESS(rc))
229 {
230#ifdef VBOX_WITH_PCI_PASSTHROUGH
231 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
232 rc = PciRawR0Init();
233#endif
234 if (RT_SUCCESS(rc))
235 {
236 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
237 rc = CPUMR0ModuleInit();
238 if (RT_SUCCESS(rc))
239 {
240#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
241 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
242 rc = vmmR0TripleFaultHackInit();
243 if (RT_SUCCESS(rc))
244#endif
245 {
246 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
247 if (RT_SUCCESS(rc))
248 {
249 g_rcRawModeUsability = SUPR0GetRawModeUsability();
250 if (g_rcRawModeUsability != VINF_SUCCESS)
251 SUPR0Printf("VMMR0!ModuleInit: SUPR0GetRawModeUsability -> %Rrc\n",
252 g_rcRawModeUsability);
253 LogFlow(("ModuleInit: returns success\n"));
254 return VINF_SUCCESS;
255 }
256 }
257
258 /*
259 * Bail out.
260 */
261#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
262 vmmR0TripleFaultHackTerm();
263#endif
264 }
265 else
266 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
267#ifdef VBOX_WITH_PCI_PASSTHROUGH
268 PciRawR0Term();
269#endif
270 }
271 else
272 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
273 IntNetR0Term();
274 }
275 else
276 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
277#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
278 PGMR0DynMapTerm();
279#endif
280 }
281 else
282 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
283 PGMDeregisterStringFormatTypes();
284 }
285 else
286 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
287 HMR0Term();
288 }
289 else
290 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
291 GMMR0Term();
292 }
293 else
294 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
295 GVMMR0Term();
296 }
297 else
298 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
299 vmmTermFormatTypes();
300 }
301 else
302 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
303
304 LogFlow(("ModuleInit: failed %Rrc\n", rc));
305 return rc;
306}
307
308
309/**
310 * Terminate the module.
311 * This is called when we're finally unloaded.
312 *
313 * @param hMod Image handle for use in APIs.
314 */
315DECLEXPORT(void) ModuleTerm(void *hMod)
316{
317 NOREF(hMod);
318 LogFlow(("ModuleTerm:\n"));
319
320 /*
321 * Terminate the CPUM module (Local APIC cleanup).
322 */
323 CPUMR0ModuleTerm();
324
325 /*
326 * Terminate the internal network service.
327 */
328 IntNetR0Term();
329
330 /*
331 * PGM (Darwin), HM and PciRaw global cleanup.
332 */
333#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
334 PGMR0DynMapTerm();
335#endif
336#ifdef VBOX_WITH_PCI_PASSTHROUGH
337 PciRawR0Term();
338#endif
339 PGMDeregisterStringFormatTypes();
340 HMR0Term();
341#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
342 vmmR0TripleFaultHackTerm();
343#endif
344
345 /*
346 * Destroy the GMM and GVMM instances.
347 */
348 GMMR0Term();
349 GVMMR0Term();
350
351 vmmTermFormatTypes();
352
353 LogFlow(("ModuleTerm: returns\n"));
354}
355
356
357/**
358 * Initiates the R0 driver for a particular VM instance.
359 *
360 * @returns VBox status code.
361 *
362 * @param pGVM The global (ring-0) VM structure.
363 * @param pVM The cross context VM structure.
364 * @param uSvnRev The SVN revision of the ring-3 part.
365 * @param uBuildType Build type indicator.
366 * @thread EMT(0)
367 */
368static int vmmR0InitVM(PGVM pGVM, PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
369{
370 VMM_CHECK_SMAP_SETUP();
371 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
372
373 /*
374 * Match the SVN revisions and build type.
375 */
376 if (uSvnRev != VMMGetSvnRev())
377 {
378 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
379 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
380 return VERR_VMM_R0_VERSION_MISMATCH;
381 }
382 if (uBuildType != vmmGetBuildType())
383 {
384 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
385 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
386 return VERR_VMM_R0_VERSION_MISMATCH;
387 }
388
389 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0 /*idCpu*/);
390 if (RT_FAILURE(rc))
391 return rc;
392
393#ifdef LOG_ENABLED
394 /*
395 * Register the EMT R0 logger instance for VCPU 0.
396 */
397 PVMCPU pVCpu = &pVM->aCpus[0];
398
399 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
400 if (pR0Logger)
401 {
402# if 0 /* testing of the logger. */
403 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
404 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
405 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
406 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
407
408 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
409 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
410 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
411 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
412
413 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
414 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
415 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
416 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
417
418 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
419 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
420 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
421 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
422 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
423 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
424
425 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
426 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
427
428 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
429 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
430 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
431# endif
432 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
433 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
434 pR0Logger->fRegistered = true;
435 }
436#endif /* LOG_ENABLED */
437
438 /*
439 * Check if the host supports high resolution timers or not.
440 */
441 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
442 && !RTTimerCanDoHighResolution())
443 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
444
445 /*
446 * Initialize the per VM data for GVMM and GMM.
447 */
448 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
449 rc = GVMMR0InitVM(pGVM);
450// if (RT_SUCCESS(rc))
451// rc = GMMR0InitPerVMData(pVM);
452 if (RT_SUCCESS(rc))
453 {
454 /*
455 * Init HM, CPUM and PGM (Darwin only).
456 */
457 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
458 rc = HMR0InitVM(pVM);
459 if (RT_SUCCESS(rc))
460 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
461 if (RT_SUCCESS(rc))
462 {
463 rc = CPUMR0InitVM(pVM);
464 if (RT_SUCCESS(rc))
465 {
466 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
467#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
468 rc = PGMR0DynMapInitVM(pVM);
469#endif
470 if (RT_SUCCESS(rc))
471 {
472 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
473 rc = EMR0InitVM(pGVM, pVM);
474 if (RT_SUCCESS(rc))
475 {
476 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
477#ifdef VBOX_WITH_PCI_PASSTHROUGH
478 rc = PciRawR0InitVM(pGVM, pVM);
479#endif
480 if (RT_SUCCESS(rc))
481 {
482 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
483 rc = GIMR0InitVM(pVM);
484 if (RT_SUCCESS(rc))
485 {
486 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
487 if (RT_SUCCESS(rc))
488 {
489 GVMMR0DoneInitVM(pGVM);
490
491 /*
492 * Collect a bit of info for the VM release log.
493 */
494 pVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
495 pVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
496
497 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
498 return rc;
499 }
500
501 /* bail out*/
502 GIMR0TermVM(pVM);
503 }
504#ifdef VBOX_WITH_PCI_PASSTHROUGH
505 PciRawR0TermVM(pGVM, pVM);
506#endif
507 }
508 }
509 }
510 }
511 HMR0TermVM(pVM);
512 }
513 }
514
515 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
516 return rc;
517}
518
519
520/**
521 * Does EMT specific VM initialization.
522 *
523 * @returns VBox status code.
524 * @param pGVM The ring-0 VM structure.
525 * @param pVM The cross context VM structure.
526 * @param idCpu The EMT that's calling.
527 */
528static int vmmR0InitVMEmt(PGVM pGVM, PVM pVM, VMCPUID idCpu)
529{
530 /* Paranoia (caller checked these already). */
531 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
532 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
533
534#ifdef LOG_ENABLED
535 /*
536 * Registration of ring 0 loggers.
537 */
538 PVMCPU pVCpu = &pVM->aCpus[idCpu];
539 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
540 if ( pR0Logger
541 && !pR0Logger->fRegistered)
542 {
543 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
544 pR0Logger->fRegistered = true;
545 }
546#endif
547 RT_NOREF(pVM);
548
549 return VINF_SUCCESS;
550}
551
552
553
554/**
555 * Terminates the R0 bits for a particular VM instance.
556 *
557 * This is normally called by ring-3 as part of the VM termination process, but
558 * may alternatively be called during the support driver session cleanup when
559 * the VM object is destroyed (see GVMM).
560 *
561 * @returns VBox status code.
562 *
563 * @param pGVM The global (ring-0) VM structure.
564 * @param pVM The cross context VM structure.
565 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
566 * thread.
567 * @thread EMT(0) or session clean up thread.
568 */
569VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, PVM pVM, VMCPUID idCpu)
570{
571 /*
572 * Check EMT(0) claim if we're called from userland.
573 */
574 if (idCpu != NIL_VMCPUID)
575 {
576 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
577 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
578 if (RT_FAILURE(rc))
579 return rc;
580 }
581
582#ifdef VBOX_WITH_PCI_PASSTHROUGH
583 PciRawR0TermVM(pGVM, pVM);
584#endif
585
586 /*
587 * Tell GVMM what we're up to and check that we only do this once.
588 */
589 if (GVMMR0DoingTermVM(pGVM))
590 {
591 GIMR0TermVM(pVM);
592
593 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
594 * here to make sure we don't leak any shared pages if we crash... */
595#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
596 PGMR0DynMapTermVM(pVM);
597#endif
598 HMR0TermVM(pVM);
599 }
600
601 /*
602 * Deregister the logger.
603 */
604 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
605 return VINF_SUCCESS;
606}
607
608
609/**
610 * An interrupt or unhalt force flag is set, deal with it.
611 *
612 * @returns VINF_SUCCESS (or VINF_EM_HALT).
613 * @param pVCpu The cross context virtual CPU structure.
614 * @param uMWait Result from EMMonitorWaitIsActive().
615 * @param enmInterruptibility Guest CPU interruptbility level.
616 */
617static int vmmR0DoHaltInterrupt(PVMCPU pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
618{
619 Assert(!TRPMHasTrap(pVCpu));
620
621 /*
622 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
623 */
624 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
625 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
626 {
627 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_INT_INHIBITED)
628 {
629 uint8_t u8Interrupt = 0;
630 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
631 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
632 if (RT_SUCCESS(rc))
633 {
634 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
635
636 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
637 AssertRCSuccess(rc);
638 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
639 return rc;
640 }
641 }
642 }
643 /*
644 * SMI is not implemented yet, at least not here.
645 */
646 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
647 {
648 return VINF_EM_HALT;
649 }
650 /*
651 * NMI.
652 */
653 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
654 {
655 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
656 {
657 /** @todo later. */
658 return VINF_EM_HALT;
659 }
660 }
661
662 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
663 {
664 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
665 return VINF_SUCCESS;
666 }
667 if (uMWait > 1)
668 {
669 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
670 return VINF_SUCCESS;
671 }
672
673 return VINF_EM_HALT;
674}
675
676
677/**
678 * This does one round of vmR3HaltGlobal1Halt().
679 *
680 * The rational here is that we'll reduce latency in interrupt situations if we
681 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
682 * MWAIT), but do one round of blocking here instead and hope the interrupt is
683 * raised in the meanwhile.
684 *
685 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
686 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
687 * ring-0 call (unless we're too close to a timer event). When the interrupt
688 * wakes us up, we'll return from ring-0 and EM will by instinct do a
689 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
690 * back to VMMR0EntryFast().
691 *
692 * @returns VINF_SUCCESS or VINF_EM_HALT.
693 * @param pGVM The ring-0 VM structure.
694 * @param pVM The cross context VM structure.
695 * @param pGVCpu The ring-0 virtual CPU structure.
696 * @param pVCpu The cross context virtual CPU structure.
697 *
698 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
699 * the VM module, probably to VMM. Then this would be more weird wrt
700 * parameters and statistics.
701 */
702static int vmmR0DoHalt(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, PVMCPU pVCpu)
703{
704 Assert(pVCpu == pGVCpu->pVCpu);
705
706 /*
707 * Do spin stat historization.
708 */
709 if (++pVCpu->vmm.s.cR0Halts & 0xff)
710 { /* likely */ }
711 else if (pVCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3)
712 {
713 pVCpu->vmm.s.cR0HaltsSucceeded = 2;
714 pVCpu->vmm.s.cR0HaltsToRing3 = 0;
715 }
716 else
717 {
718 pVCpu->vmm.s.cR0HaltsSucceeded = 0;
719 pVCpu->vmm.s.cR0HaltsToRing3 = 2;
720 }
721
722 /*
723 * Flags that makes us go to ring-3.
724 */
725 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
726 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
727 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
728 | VM_FF_PGM_NO_MEMORY | VM_FF_REM_HANDLER_NOTIFY | VM_FF_DEBUG_SUSPEND;
729 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
730 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
731 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
732 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM | VMCPU_FF_INTERRUPT_NESTED_GUEST /*?*/
733 | VMCPU_FF_VMX_PREEMPT_TIMER /*?*/ | VMCPU_FF_VMX_APIC_WRITE /*?*/ | VMCPU_FF_VMX_MTF /*?*/
734#ifdef VBOX_WITH_RAW_MODE
735 | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT
736 | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_CSAM_SCAN_PAGE | VMCPU_FF_CSAM_PENDING_ACTION
737 | VMCPU_FF_CPUM
738#endif
739 ;
740
741 /*
742 * Check preconditions.
743 */
744 unsigned const uMWait = EMMonitorWaitIsActive(pVCpu);
745 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pVCpu);
746 if ( pVCpu->vmm.s.fMayHaltInRing0
747 && !TRPMHasTrap(pVCpu)
748 && ( enmInterruptibility <= CPUMINTERRUPTIBILITY_INT_INHIBITED
749 || uMWait > 1))
750 {
751 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs)
752 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
753 {
754 /*
755 * Interrupts pending already?
756 */
757 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
758 APICUpdatePendingInterrupts(pVCpu);
759
760 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
761 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
762 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
763 ASMNopPause();
764
765 /*
766 * Check out how long till the next timer event.
767 */
768 uint64_t u64Delta;
769 uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta);
770
771 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs)
772 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
773 {
774 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
775 APICUpdatePendingInterrupts(pVCpu);
776
777 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
778 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
779 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
780
781 /*
782 * Wait if there is enough time to the next timer event.
783 */
784 if (u64Delta >= pVCpu->vmm.s.cNsSpinBlockThreshold)
785 {
786 /* If there are few other CPU cores around, we will procrastinate a
787 little before going to sleep, hoping for some device raising an
788 interrupt or similar. Though, the best thing here would be to
789 dynamically adjust the spin count according to its usfulness or
790 something... */
791 if ( pVCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3
792 && RTMpGetOnlineCount() >= 4)
793 {
794 /** @todo Figure out how we can skip this if it hasn't help recently...
795 * @bugref{9172#c12} */
796 uint32_t cSpinLoops = 42;
797 while (cSpinLoops-- > 0)
798 {
799 ASMNopPause();
800 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
801 APICUpdatePendingInterrupts(pVCpu);
802 ASMNopPause();
803 if (VM_FF_IS_ANY_SET(pVM, fVmFFs))
804 {
805 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3FromSpin);
806 return VINF_EM_HALT;
807 }
808 ASMNopPause();
809 if (VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
810 {
811 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3FromSpin);
812 return VINF_EM_HALT;
813 }
814 ASMNopPause();
815 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
816 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
817 {
818 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExecFromSpin);
819 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
820 }
821 ASMNopPause();
822 }
823 }
824
825 /* Block. We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
826 knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here). */
827 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED);
828 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
829 int rc = GVMMR0SchedHalt(pGVM, pVM, pGVCpu, u64GipTime);
830 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
831 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
832 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
833 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
834 if ( rc == VINF_SUCCESS
835 || rc == VERR_INTERRUPTED)
836
837 {
838 /* Keep some stats like ring-3 does. */
839 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
840 if (cNsOverslept > 50000)
841 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
842 else if (cNsOverslept < -50000)
843 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
844 else
845 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
846
847 /*
848 * Recheck whether we can resume execution or have to go to ring-3.
849 */
850 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs)
851 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
852 {
853 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
854 APICUpdatePendingInterrupts(pVCpu);
855 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
856 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
857 {
858 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExecFromBlock);
859 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
860 }
861 }
862 }
863 }
864 }
865 }
866 }
867 return VINF_EM_HALT;
868}
869
870
871/**
872 * VMM ring-0 thread-context callback.
873 *
874 * This does common HM state updating and calls the HM-specific thread-context
875 * callback.
876 *
877 * @param enmEvent The thread-context event.
878 * @param pvUser Opaque pointer to the VMCPU.
879 *
880 * @thread EMT(pvUser)
881 */
882static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
883{
884 PVMCPU pVCpu = (PVMCPU)pvUser;
885
886 switch (enmEvent)
887 {
888 case RTTHREADCTXEVENT_IN:
889 {
890 /*
891 * Linux may call us with preemption enabled (really!) but technically we
892 * cannot get preempted here, otherwise we end up in an infinite recursion
893 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
894 * ad infinitum). Let's just disable preemption for now...
895 */
896 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
897 * preemption after doing the callout (one or two functions up the
898 * call chain). */
899 /** @todo r=ramshankar: See @bugref{5313#c30}. */
900 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
901 RTThreadPreemptDisable(&ParanoidPreemptState);
902
903 /* We need to update the VCPU <-> host CPU mapping. */
904 RTCPUID idHostCpu;
905 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
906 pVCpu->iHostCpuSet = iHostCpuSet;
907 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
908
909 /* In the very unlikely event that the GIP delta for the CPU we're
910 rescheduled needs calculating, try force a return to ring-3.
911 We unfortunately cannot do the measurements right here. */
912 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
913 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
914
915 /* Invoke the HM-specific thread-context callback. */
916 HMR0ThreadCtxCallback(enmEvent, pvUser);
917
918 /* Restore preemption. */
919 RTThreadPreemptRestore(&ParanoidPreemptState);
920 break;
921 }
922
923 case RTTHREADCTXEVENT_OUT:
924 {
925 /* Invoke the HM-specific thread-context callback. */
926 HMR0ThreadCtxCallback(enmEvent, pvUser);
927
928 /*
929 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
930 * have the same host CPU associated with it.
931 */
932 pVCpu->iHostCpuSet = UINT32_MAX;
933 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
934 break;
935 }
936
937 default:
938 /* Invoke the HM-specific thread-context callback. */
939 HMR0ThreadCtxCallback(enmEvent, pvUser);
940 break;
941 }
942}
943
944
945/**
946 * Creates thread switching hook for the current EMT thread.
947 *
948 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
949 * platform does not implement switcher hooks, no hooks will be create and the
950 * member set to NIL_RTTHREADCTXHOOK.
951 *
952 * @returns VBox status code.
953 * @param pVCpu The cross context virtual CPU structure.
954 * @thread EMT(pVCpu)
955 */
956VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu)
957{
958 VMCPU_ASSERT_EMT(pVCpu);
959 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
960
961#if 1 /* To disable this stuff change to zero. */
962 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
963 if (RT_SUCCESS(rc))
964 return rc;
965#else
966 RT_NOREF(vmmR0ThreadCtxCallback);
967 int rc = VERR_NOT_SUPPORTED;
968#endif
969
970 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
971 if (rc == VERR_NOT_SUPPORTED)
972 return VINF_SUCCESS;
973
974 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
975 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
976}
977
978
979/**
980 * Destroys the thread switching hook for the specified VCPU.
981 *
982 * @param pVCpu The cross context virtual CPU structure.
983 * @remarks Can be called from any thread.
984 */
985VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu)
986{
987 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
988 AssertRC(rc);
989 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
990}
991
992
993/**
994 * Disables the thread switching hook for this VCPU (if we got one).
995 *
996 * @param pVCpu The cross context virtual CPU structure.
997 * @thread EMT(pVCpu)
998 *
999 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
1000 * this call. This means you have to be careful with what you do!
1001 */
1002VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu)
1003{
1004 /*
1005 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1006 * @bugref{7726#c19} explains the need for this trick:
1007 *
1008 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
1009 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1010 * longjmp & normal return to ring-3, which opens a window where we may be
1011 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
1012 * the CPU starts executing a different EMT. Both functions first disables
1013 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1014 * an opening for getting preempted.
1015 */
1016 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1017 * all the time. */
1018 /** @todo move this into the context hook disabling if(). */
1019 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1020
1021 /*
1022 * Disable the context hook, if we got one.
1023 */
1024 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1025 {
1026 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1027 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1028 AssertRC(rc);
1029 }
1030}
1031
1032
1033/**
1034 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1035 *
1036 * @returns true if registered, false otherwise.
1037 * @param pVCpu The cross context virtual CPU structure.
1038 */
1039DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
1040{
1041 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
1042}
1043
1044
1045/**
1046 * Whether thread-context hooks are registered for this VCPU.
1047 *
1048 * @returns true if registered, false otherwise.
1049 * @param pVCpu The cross context virtual CPU structure.
1050 */
1051VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
1052{
1053 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1054}
1055
1056
1057#ifdef VBOX_WITH_STATISTICS
1058/**
1059 * Record return code statistics
1060 * @param pVM The cross context VM structure.
1061 * @param pVCpu The cross context virtual CPU structure.
1062 * @param rc The status code.
1063 */
1064static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
1065{
1066 /*
1067 * Collect statistics.
1068 */
1069 switch (rc)
1070 {
1071 case VINF_SUCCESS:
1072 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1073 break;
1074 case VINF_EM_RAW_INTERRUPT:
1075 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1076 break;
1077 case VINF_EM_RAW_INTERRUPT_HYPER:
1078 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1079 break;
1080 case VINF_EM_RAW_GUEST_TRAP:
1081 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1082 break;
1083 case VINF_EM_RAW_RING_SWITCH:
1084 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1085 break;
1086 case VINF_EM_RAW_RING_SWITCH_INT:
1087 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1088 break;
1089 case VINF_EM_RAW_STALE_SELECTOR:
1090 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1091 break;
1092 case VINF_EM_RAW_IRET_TRAP:
1093 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1094 break;
1095 case VINF_IOM_R3_IOPORT_READ:
1096 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1097 break;
1098 case VINF_IOM_R3_IOPORT_WRITE:
1099 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1100 break;
1101 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1102 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1103 break;
1104 case VINF_IOM_R3_MMIO_READ:
1105 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1106 break;
1107 case VINF_IOM_R3_MMIO_WRITE:
1108 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1109 break;
1110 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1111 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1112 break;
1113 case VINF_IOM_R3_MMIO_READ_WRITE:
1114 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1115 break;
1116 case VINF_PATM_HC_MMIO_PATCH_READ:
1117 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1118 break;
1119 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1120 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1121 break;
1122 case VINF_CPUM_R3_MSR_READ:
1123 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1124 break;
1125 case VINF_CPUM_R3_MSR_WRITE:
1126 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1127 break;
1128 case VINF_EM_RAW_EMULATE_INSTR:
1129 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1130 break;
1131 case VINF_PATCH_EMULATE_INSTR:
1132 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1133 break;
1134 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1135 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1136 break;
1137 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1138 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1139 break;
1140 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1141 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1142 break;
1143 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1144 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1145 break;
1146 case VINF_CSAM_PENDING_ACTION:
1147 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1148 break;
1149 case VINF_PGM_SYNC_CR3:
1150 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1151 break;
1152 case VINF_PATM_PATCH_INT3:
1153 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1154 break;
1155 case VINF_PATM_PATCH_TRAP_PF:
1156 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1157 break;
1158 case VINF_PATM_PATCH_TRAP_GP:
1159 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1160 break;
1161 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1162 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1163 break;
1164 case VINF_EM_RESCHEDULE_REM:
1165 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1166 break;
1167 case VINF_EM_RAW_TO_R3:
1168 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1169 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1170 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1171 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1172 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1173 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1174 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1175 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1176 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1177 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1178 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1179 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1180 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1181 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1182 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1183 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1184 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1185 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1186 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1187 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1188 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1189 else
1190 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1191 break;
1192
1193 case VINF_EM_RAW_TIMER_PENDING:
1194 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1195 break;
1196 case VINF_EM_RAW_INTERRUPT_PENDING:
1197 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1198 break;
1199 case VINF_VMM_CALL_HOST:
1200 switch (pVCpu->vmm.s.enmCallRing3Operation)
1201 {
1202 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1203 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1204 break;
1205 case VMMCALLRING3_PDM_LOCK:
1206 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1207 break;
1208 case VMMCALLRING3_PGM_POOL_GROW:
1209 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1210 break;
1211 case VMMCALLRING3_PGM_LOCK:
1212 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1213 break;
1214 case VMMCALLRING3_PGM_MAP_CHUNK:
1215 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1216 break;
1217 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1218 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1219 break;
1220 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
1221 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
1222 break;
1223 case VMMCALLRING3_VMM_LOGGER_FLUSH:
1224 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
1225 break;
1226 case VMMCALLRING3_VM_SET_ERROR:
1227 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1228 break;
1229 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1230 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1231 break;
1232 case VMMCALLRING3_VM_R0_ASSERTION:
1233 default:
1234 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1235 break;
1236 }
1237 break;
1238 case VINF_PATM_DUPLICATE_FUNCTION:
1239 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1240 break;
1241 case VINF_PGM_CHANGE_MODE:
1242 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1243 break;
1244 case VINF_PGM_POOL_FLUSH_PENDING:
1245 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1246 break;
1247 case VINF_EM_PENDING_REQUEST:
1248 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1249 break;
1250 case VINF_EM_HM_PATCH_TPR_INSTR:
1251 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1252 break;
1253 default:
1254 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1255 break;
1256 }
1257}
1258#endif /* VBOX_WITH_STATISTICS */
1259
1260
1261/**
1262 * The Ring 0 entry point, called by the fast-ioctl path.
1263 *
1264 * @param pGVM The global (ring-0) VM structure.
1265 * @param pVM The cross context VM structure.
1266 * The return code is stored in pVM->vmm.s.iLastGZRc.
1267 * @param idCpu The Virtual CPU ID of the calling EMT.
1268 * @param enmOperation Which operation to execute.
1269 * @remarks Assume called with interrupts _enabled_.
1270 */
1271VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1272{
1273 /*
1274 * Validation.
1275 */
1276 if ( idCpu < pGVM->cCpus
1277 && pGVM->cCpus == pVM->cCpus)
1278 { /*likely*/ }
1279 else
1280 {
1281 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x/%#x\n", idCpu, pGVM->cCpus, pVM->cCpus);
1282 return;
1283 }
1284
1285 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1286 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1287 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1288 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1289 && pVCpu->hNativeThreadR0 == hNativeThread))
1290 { /* likely */ }
1291 else
1292 {
1293 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pVCpu->hNativeThreadR0=%p\n",
1294 idCpu, hNativeThread, pGVCpu->hEMT, pVCpu->hNativeThreadR0);
1295 return;
1296 }
1297
1298 /*
1299 * SMAP fun.
1300 */
1301 VMM_CHECK_SMAP_SETUP();
1302 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1303
1304 /*
1305 * Perform requested operation.
1306 */
1307 switch (enmOperation)
1308 {
1309 /*
1310 * Switch to GC and run guest raw mode code.
1311 * Disable interrupts before doing the world switch.
1312 */
1313 case VMMR0_DO_RAW_RUN:
1314 {
1315#ifdef VBOX_WITH_RAW_MODE
1316# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1317 /* Some safety precautions first. */
1318 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1319 {
1320 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
1321 break;
1322 }
1323# endif
1324 if (RT_SUCCESS(g_rcRawModeUsability))
1325 { /* likely */ }
1326 else
1327 {
1328 pVCpu->vmm.s.iLastGZRc = g_rcRawModeUsability;
1329 break;
1330 }
1331
1332 /*
1333 * Disable preemption.
1334 */
1335 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1336 RTThreadPreemptDisable(&PreemptState);
1337
1338 /*
1339 * Get the host CPU identifiers, make sure they are valid and that
1340 * we've got a TSC delta for the CPU.
1341 */
1342 RTCPUID idHostCpu;
1343 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1344 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1345 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1346 {
1347 /*
1348 * Commit the CPU identifiers and update the periodict preemption timer if it's active.
1349 */
1350# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1351 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1352# endif
1353 pVCpu->iHostCpuSet = iHostCpuSet;
1354 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1355
1356 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1357 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1358
1359 /*
1360 * We might need to disable VT-x if the active switcher turns off paging.
1361 */
1362 bool fVTxDisabled;
1363 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1364 if (RT_SUCCESS(rc))
1365 {
1366 /*
1367 * Disable interrupts and run raw-mode code. The loop is for efficiently
1368 * dispatching tracepoints that fired in raw-mode context.
1369 */
1370 RTCCUINTREG uFlags = ASMIntDisableFlags();
1371
1372 for (;;)
1373 {
1374 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
1375 TMNotifyStartOfExecution(pVCpu);
1376
1377 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1378 pVCpu->vmm.s.iLastGZRc = rc;
1379
1380 TMNotifyEndOfExecution(pVCpu);
1381 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1382
1383 if (rc != VINF_VMM_CALL_TRACER)
1384 break;
1385 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
1386 }
1387
1388 /*
1389 * Re-enable VT-x before we dispatch any pending host interrupts and
1390 * re-enables interrupts.
1391 */
1392 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1393
1394 if ( rc == VINF_EM_RAW_INTERRUPT
1395 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1396 TRPMR0DispatchHostInterrupt(pVM);
1397
1398 ASMSetFlags(uFlags);
1399
1400 /* Fire dtrace probe and collect statistics. */
1401 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1402# ifdef VBOX_WITH_STATISTICS
1403 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1404 vmmR0RecordRC(pVM, pVCpu, rc);
1405# endif
1406 }
1407 else
1408 pVCpu->vmm.s.iLastGZRc = rc;
1409
1410 /*
1411 * Invalidate the host CPU identifiers as we restore preemption.
1412 */
1413 pVCpu->iHostCpuSet = UINT32_MAX;
1414 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1415
1416 RTThreadPreemptRestore(&PreemptState);
1417 }
1418 /*
1419 * Invalid CPU set index or TSC delta in need of measuring.
1420 */
1421 else
1422 {
1423 RTThreadPreemptRestore(&PreemptState);
1424 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1425 {
1426 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1427 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1428 0 /*default cTries*/);
1429 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1430 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1431 else
1432 pVCpu->vmm.s.iLastGZRc = rc;
1433 }
1434 else
1435 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1436 }
1437
1438#else /* !VBOX_WITH_RAW_MODE */
1439 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_NOT_SUPPORTED;
1440#endif
1441 break;
1442 }
1443
1444 /*
1445 * Run guest code using the available hardware acceleration technology.
1446 */
1447 case VMMR0_DO_HM_RUN:
1448 {
1449 for (;;) /* hlt loop */
1450 {
1451 /*
1452 * Disable preemption.
1453 */
1454 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1455 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1456 RTThreadPreemptDisable(&PreemptState);
1457
1458 /*
1459 * Get the host CPU identifiers, make sure they are valid and that
1460 * we've got a TSC delta for the CPU.
1461 */
1462 RTCPUID idHostCpu;
1463 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1464 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1465 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1466 {
1467 pVCpu->iHostCpuSet = iHostCpuSet;
1468 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1469
1470 /*
1471 * Update the periodic preemption timer if it's active.
1472 */
1473 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1474 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1475 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1476
1477#ifdef VMM_R0_TOUCH_FPU
1478 /*
1479 * Make sure we've got the FPU state loaded so and we don't need to clear
1480 * CR0.TS and get out of sync with the host kernel when loading the guest
1481 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1482 */
1483 CPUMR0TouchHostFpu();
1484#endif
1485 int rc;
1486 bool fPreemptRestored = false;
1487 if (!HMR0SuspendPending())
1488 {
1489 /*
1490 * Enable the context switching hook.
1491 */
1492 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1493 {
1494 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1495 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1496 }
1497
1498 /*
1499 * Enter HM context.
1500 */
1501 rc = HMR0Enter(pVCpu);
1502 if (RT_SUCCESS(rc))
1503 {
1504 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1505
1506 /*
1507 * When preemption hooks are in place, enable preemption now that
1508 * we're in HM context.
1509 */
1510 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1511 {
1512 fPreemptRestored = true;
1513 RTThreadPreemptRestore(&PreemptState);
1514 }
1515
1516 /*
1517 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1518 */
1519 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1520 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1521 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1522
1523 /*
1524 * Assert sanity on the way out. Using manual assertions code here as normal
1525 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1526 */
1527 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1528 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1529 {
1530 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1531 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1532 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1533 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1534 }
1535 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1536 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1537 {
1538 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1539 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1540 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1541 rc = VERR_INVALID_STATE;
1542 }
1543
1544 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1545 }
1546 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1547
1548 /*
1549 * Invalidate the host CPU identifiers before we disable the context
1550 * hook / restore preemption.
1551 */
1552 pVCpu->iHostCpuSet = UINT32_MAX;
1553 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1554
1555 /*
1556 * Disable context hooks. Due to unresolved cleanup issues, we
1557 * cannot leave the hooks enabled when we return to ring-3.
1558 *
1559 * Note! At the moment HM may also have disabled the hook
1560 * when we get here, but the IPRT API handles that.
1561 */
1562 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1563 {
1564 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1565 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1566 }
1567 }
1568 /*
1569 * The system is about to go into suspend mode; go back to ring 3.
1570 */
1571 else
1572 {
1573 rc = VINF_EM_RAW_INTERRUPT;
1574 pVCpu->iHostCpuSet = UINT32_MAX;
1575 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1576 }
1577
1578 /** @todo When HM stops messing with the context hook state, we'll disable
1579 * preemption again before the RTThreadCtxHookDisable call. */
1580 if (!fPreemptRestored)
1581 RTThreadPreemptRestore(&PreemptState);
1582
1583 pVCpu->vmm.s.iLastGZRc = rc;
1584
1585 /* Fire dtrace probe and collect statistics. */
1586 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1587#ifdef VBOX_WITH_STATISTICS
1588 vmmR0RecordRC(pVM, pVCpu, rc);
1589#endif
1590#if 1
1591 /*
1592 * If this is a halt.
1593 */
1594 if (rc != VINF_EM_HALT)
1595 { /* we're not in a hurry for a HLT, so prefer this path */ }
1596 else
1597 {
1598 pVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pVM, pGVCpu, pVCpu);
1599 if (rc == VINF_SUCCESS)
1600 {
1601 pVCpu->vmm.s.cR0HaltsSucceeded++;
1602 continue;
1603 }
1604 pVCpu->vmm.s.cR0HaltsToRing3++;
1605 }
1606#endif
1607 }
1608 /*
1609 * Invalid CPU set index or TSC delta in need of measuring.
1610 */
1611 else
1612 {
1613 pVCpu->iHostCpuSet = UINT32_MAX;
1614 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1615 RTThreadPreemptRestore(&PreemptState);
1616 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1617 {
1618 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1619 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1620 0 /*default cTries*/);
1621 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1622 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1623 else
1624 pVCpu->vmm.s.iLastGZRc = rc;
1625 }
1626 else
1627 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1628 }
1629 break;
1630
1631 } /* halt loop. */
1632 break;
1633 }
1634
1635#ifdef VBOX_WITH_NEM_R0
1636# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1637 case VMMR0_DO_NEM_RUN:
1638 {
1639 /*
1640 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1641 */
1642 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1643 int rc = vmmR0CallRing3SetJmp2(&pVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1644 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1645 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1646
1647 pVCpu->vmm.s.iLastGZRc = rc;
1648
1649 /*
1650 * Fire dtrace probe and collect statistics.
1651 */
1652 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1653# ifdef VBOX_WITH_STATISTICS
1654 vmmR0RecordRC(pVM, pVCpu, rc);
1655# endif
1656 break;
1657 }
1658# endif
1659#endif
1660
1661
1662 /*
1663 * For profiling.
1664 */
1665 case VMMR0_DO_NOP:
1666 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1667 break;
1668
1669 /*
1670 * Shouldn't happen.
1671 */
1672 default:
1673 AssertMsgFailed(("%#x\n", enmOperation));
1674 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1675 break;
1676 }
1677 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1678}
1679
1680
1681/**
1682 * Validates a session or VM session argument.
1683 *
1684 * @returns true / false accordingly.
1685 * @param pVM The cross context VM structure.
1686 * @param pClaimedSession The session claim to validate.
1687 * @param pSession The session argument.
1688 */
1689DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1690{
1691 /* This must be set! */
1692 if (!pSession)
1693 return false;
1694
1695 /* Only one out of the two. */
1696 if (pVM && pClaimedSession)
1697 return false;
1698 if (pVM)
1699 pClaimedSession = pVM->pSession;
1700 return pClaimedSession == pSession;
1701}
1702
1703
1704/**
1705 * VMMR0EntryEx worker function, either called directly or when ever possible
1706 * called thru a longjmp so we can exit safely on failure.
1707 *
1708 * @returns VBox status code.
1709 * @param pGVM The global (ring-0) VM structure.
1710 * @param pVM The cross context VM structure.
1711 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1712 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1713 * @param enmOperation Which operation to execute.
1714 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1715 * The support driver validates this if it's present.
1716 * @param u64Arg Some simple constant argument.
1717 * @param pSession The session of the caller.
1718 *
1719 * @remarks Assume called with interrupts _enabled_.
1720 */
1721static int vmmR0EntryExWorker(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1722 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1723{
1724 /*
1725 * Validate pGVM, pVM and idCpu for consistency and validity.
1726 */
1727 if ( pGVM != NULL
1728 || pVM != NULL)
1729 {
1730 if (RT_LIKELY( RT_VALID_PTR(pGVM)
1731 && RT_VALID_PTR(pVM)
1732 && ((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0))
1733 { /* likely */ }
1734 else
1735 {
1736 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p and/or pVM=%p! (op=%d)\n", pGVM, pVM, enmOperation);
1737 return VERR_INVALID_POINTER;
1738 }
1739
1740 if (RT_LIKELY(pGVM->pVM == pVM))
1741 { /* likely */ }
1742 else
1743 {
1744 SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM->pVM=%p\n", pVM, pGVM->pVM);
1745 return VERR_INVALID_PARAMETER;
1746 }
1747
1748 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1749 { /* likely */ }
1750 else
1751 {
1752 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1753 return VERR_INVALID_PARAMETER;
1754 }
1755
1756 if (RT_LIKELY( pVM->enmVMState >= VMSTATE_CREATING
1757 && pVM->enmVMState <= VMSTATE_TERMINATED
1758 && pVM->cCpus == pGVM->cCpus
1759 && pVM->pSession == pSession
1760 && pVM->pVMR0 == pVM))
1761 { /* likely */ }
1762 else
1763 {
1764 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{.enmVMState=%d, .cCpus=%#x(==%#x), .pSession=%p(==%p), .pVMR0=%p(==%p)}! (op=%d)\n",
1765 pVM, pVM->enmVMState, pVM->cCpus, pGVM->cCpus, pVM->pSession, pSession, pVM->pVMR0, pVM, enmOperation);
1766 return VERR_INVALID_POINTER;
1767 }
1768 }
1769 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1770 { /* likely */ }
1771 else
1772 {
1773 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1774 return VERR_INVALID_PARAMETER;
1775 }
1776
1777 /*
1778 * SMAP fun.
1779 */
1780 VMM_CHECK_SMAP_SETUP();
1781 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1782
1783 /*
1784 * Process the request.
1785 */
1786 int rc;
1787 switch (enmOperation)
1788 {
1789 /*
1790 * GVM requests
1791 */
1792 case VMMR0_DO_GVMM_CREATE_VM:
1793 if (pGVM == NULL && pVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1794 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1795 else
1796 rc = VERR_INVALID_PARAMETER;
1797 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1798 break;
1799
1800 case VMMR0_DO_GVMM_DESTROY_VM:
1801 if (pReqHdr == NULL && u64Arg == 0)
1802 rc = GVMMR0DestroyVM(pGVM, pVM);
1803 else
1804 rc = VERR_INVALID_PARAMETER;
1805 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1806 break;
1807
1808 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1809 if (pGVM != NULL && pVM != NULL)
1810 rc = GVMMR0RegisterVCpu(pGVM, pVM, idCpu);
1811 else
1812 rc = VERR_INVALID_PARAMETER;
1813 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1814 break;
1815
1816 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1817 if (pGVM != NULL && pVM != NULL)
1818 rc = GVMMR0DeregisterVCpu(pGVM, pVM, idCpu);
1819 else
1820 rc = VERR_INVALID_PARAMETER;
1821 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1822 break;
1823
1824 case VMMR0_DO_GVMM_SCHED_HALT:
1825 if (pReqHdr)
1826 return VERR_INVALID_PARAMETER;
1827 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1828 rc = GVMMR0SchedHaltReq(pGVM, pVM, idCpu, u64Arg);
1829 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1830 break;
1831
1832 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1833 if (pReqHdr || u64Arg)
1834 return VERR_INVALID_PARAMETER;
1835 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1836 rc = GVMMR0SchedWakeUp(pGVM, pVM, idCpu);
1837 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1838 break;
1839
1840 case VMMR0_DO_GVMM_SCHED_POKE:
1841 if (pReqHdr || u64Arg)
1842 return VERR_INVALID_PARAMETER;
1843 rc = GVMMR0SchedPoke(pGVM, pVM, idCpu);
1844 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1845 break;
1846
1847 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1848 if (u64Arg)
1849 return VERR_INVALID_PARAMETER;
1850 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1851 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1852 break;
1853
1854 case VMMR0_DO_GVMM_SCHED_POLL:
1855 if (pReqHdr || u64Arg > 1)
1856 return VERR_INVALID_PARAMETER;
1857 rc = GVMMR0SchedPoll(pGVM, pVM, idCpu, !!u64Arg);
1858 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1859 break;
1860
1861 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1862 if (u64Arg)
1863 return VERR_INVALID_PARAMETER;
1864 rc = GVMMR0QueryStatisticsReq(pGVM, pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1865 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1866 break;
1867
1868 case VMMR0_DO_GVMM_RESET_STATISTICS:
1869 if (u64Arg)
1870 return VERR_INVALID_PARAMETER;
1871 rc = GVMMR0ResetStatisticsReq(pGVM, pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1872 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1873 break;
1874
1875 /*
1876 * Initialize the R0 part of a VM instance.
1877 */
1878 case VMMR0_DO_VMMR0_INIT:
1879 rc = vmmR0InitVM(pGVM, pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1880 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1881 break;
1882
1883 /*
1884 * Does EMT specific ring-0 init.
1885 */
1886 case VMMR0_DO_VMMR0_INIT_EMT:
1887 rc = vmmR0InitVMEmt(pGVM, pVM, idCpu);
1888 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1889 break;
1890
1891 /*
1892 * Terminate the R0 part of a VM instance.
1893 */
1894 case VMMR0_DO_VMMR0_TERM:
1895 rc = VMMR0TermVM(pGVM, pVM, 0 /*idCpu*/);
1896 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1897 break;
1898
1899 /*
1900 * Attempt to enable hm mode and check the current setting.
1901 */
1902 case VMMR0_DO_HM_ENABLE:
1903 rc = HMR0EnableAllCpus(pVM);
1904 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1905 break;
1906
1907 /*
1908 * Setup the hardware accelerated session.
1909 */
1910 case VMMR0_DO_HM_SETUP_VM:
1911 rc = HMR0SetupVM(pVM);
1912 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1913 break;
1914
1915 /*
1916 * Switch to RC to execute Hypervisor function.
1917 */
1918 case VMMR0_DO_CALL_HYPERVISOR:
1919 {
1920#ifdef VBOX_WITH_RAW_MODE
1921 /*
1922 * Validate input / context.
1923 */
1924 if (RT_UNLIKELY(idCpu != 0))
1925 return VERR_INVALID_CPU_ID;
1926 if (RT_UNLIKELY(pVM->cCpus != 1))
1927 return VERR_INVALID_PARAMETER;
1928 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1929# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1930 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1931 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1932# endif
1933 if (RT_FAILURE(g_rcRawModeUsability))
1934 return g_rcRawModeUsability;
1935
1936 /*
1937 * Disable interrupts.
1938 */
1939 RTCCUINTREG fFlags = ASMIntDisableFlags();
1940
1941 /*
1942 * Get the host CPU identifiers, make sure they are valid and that
1943 * we've got a TSC delta for the CPU.
1944 */
1945 RTCPUID idHostCpu;
1946 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1947 if (RT_UNLIKELY(iHostCpuSet >= RTCPUSET_MAX_CPUS))
1948 {
1949 ASMSetFlags(fFlags);
1950 return VERR_INVALID_CPU_INDEX;
1951 }
1952 if (RT_UNLIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1953 {
1954 ASMSetFlags(fFlags);
1955 rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1956 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1957 0 /*default cTries*/);
1958 if (RT_FAILURE(rc) && rc != VERR_CPU_OFFLINE)
1959 {
1960 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1961 return rc;
1962 }
1963 }
1964
1965 /*
1966 * Commit the CPU identifiers.
1967 */
1968# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1969 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1970# endif
1971 pVCpu->iHostCpuSet = iHostCpuSet;
1972 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1973
1974 /*
1975 * We might need to disable VT-x if the active switcher turns off paging.
1976 */
1977 bool fVTxDisabled;
1978 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1979 if (RT_SUCCESS(rc))
1980 {
1981 /*
1982 * Go through the wormhole...
1983 */
1984 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1985
1986 /*
1987 * Re-enable VT-x before we dispatch any pending host interrupts.
1988 */
1989 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1990
1991 if ( rc == VINF_EM_RAW_INTERRUPT
1992 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1993 TRPMR0DispatchHostInterrupt(pVM);
1994 }
1995
1996 /*
1997 * Invalidate the host CPU identifiers as we restore interrupts.
1998 */
1999 pVCpu->iHostCpuSet = UINT32_MAX;
2000 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
2001 ASMSetFlags(fFlags);
2002
2003#else /* !VBOX_WITH_RAW_MODE */
2004 rc = VERR_RAW_MODE_NOT_SUPPORTED;
2005#endif
2006 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2007 break;
2008 }
2009
2010 /*
2011 * PGM wrappers.
2012 */
2013 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
2014 if (idCpu == NIL_VMCPUID)
2015 return VERR_INVALID_CPU_ID;
2016 rc = PGMR0PhysAllocateHandyPages(pGVM, pVM, idCpu);
2017 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2018 break;
2019
2020 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
2021 if (idCpu == NIL_VMCPUID)
2022 return VERR_INVALID_CPU_ID;
2023 rc = PGMR0PhysFlushHandyPages(pGVM, pVM, idCpu);
2024 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2025 break;
2026
2027 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
2028 if (idCpu == NIL_VMCPUID)
2029 return VERR_INVALID_CPU_ID;
2030 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, pVM, idCpu);
2031 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2032 break;
2033
2034 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
2035 if (idCpu != 0)
2036 return VERR_INVALID_CPU_ID;
2037 rc = PGMR0PhysSetupIoMmu(pGVM, pVM);
2038 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2039 break;
2040
2041 /*
2042 * GMM wrappers.
2043 */
2044 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2045 if (u64Arg)
2046 return VERR_INVALID_PARAMETER;
2047 rc = GMMR0InitialReservationReq(pGVM, pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
2048 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2049 break;
2050
2051 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2052 if (u64Arg)
2053 return VERR_INVALID_PARAMETER;
2054 rc = GMMR0UpdateReservationReq(pGVM, pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
2055 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2056 break;
2057
2058 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2059 if (u64Arg)
2060 return VERR_INVALID_PARAMETER;
2061 rc = GMMR0AllocatePagesReq(pGVM, pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
2062 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2063 break;
2064
2065 case VMMR0_DO_GMM_FREE_PAGES:
2066 if (u64Arg)
2067 return VERR_INVALID_PARAMETER;
2068 rc = GMMR0FreePagesReq(pGVM, pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
2069 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2070 break;
2071
2072 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
2073 if (u64Arg)
2074 return VERR_INVALID_PARAMETER;
2075 rc = GMMR0FreeLargePageReq(pGVM, pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
2076 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2077 break;
2078
2079 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
2080 if (u64Arg)
2081 return VERR_INVALID_PARAMETER;
2082 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
2083 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2084 break;
2085
2086 case VMMR0_DO_GMM_QUERY_MEM_STATS:
2087 if (idCpu == NIL_VMCPUID)
2088 return VERR_INVALID_CPU_ID;
2089 if (u64Arg)
2090 return VERR_INVALID_PARAMETER;
2091 rc = GMMR0QueryMemoryStatsReq(pGVM, pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
2092 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2093 break;
2094
2095 case VMMR0_DO_GMM_BALLOONED_PAGES:
2096 if (u64Arg)
2097 return VERR_INVALID_PARAMETER;
2098 rc = GMMR0BalloonedPagesReq(pGVM, pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
2099 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2100 break;
2101
2102 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
2103 if (u64Arg)
2104 return VERR_INVALID_PARAMETER;
2105 rc = GMMR0MapUnmapChunkReq(pGVM, pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
2106 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2107 break;
2108
2109 case VMMR0_DO_GMM_SEED_CHUNK:
2110 if (pReqHdr)
2111 return VERR_INVALID_PARAMETER;
2112 rc = GMMR0SeedChunk(pGVM, pVM, idCpu, (RTR3PTR)u64Arg);
2113 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2114 break;
2115
2116 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
2117 if (idCpu == NIL_VMCPUID)
2118 return VERR_INVALID_CPU_ID;
2119 if (u64Arg)
2120 return VERR_INVALID_PARAMETER;
2121 rc = GMMR0RegisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
2122 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2123 break;
2124
2125 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
2126 if (idCpu == NIL_VMCPUID)
2127 return VERR_INVALID_CPU_ID;
2128 if (u64Arg)
2129 return VERR_INVALID_PARAMETER;
2130 rc = GMMR0UnregisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
2131 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2132 break;
2133
2134 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
2135 if (idCpu == NIL_VMCPUID)
2136 return VERR_INVALID_CPU_ID;
2137 if ( u64Arg
2138 || pReqHdr)
2139 return VERR_INVALID_PARAMETER;
2140 rc = GMMR0ResetSharedModules(pGVM, pVM, idCpu);
2141 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2142 break;
2143
2144#ifdef VBOX_WITH_PAGE_SHARING
2145 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
2146 {
2147 if (idCpu == NIL_VMCPUID)
2148 return VERR_INVALID_CPU_ID;
2149 if ( u64Arg
2150 || pReqHdr)
2151 return VERR_INVALID_PARAMETER;
2152 rc = GMMR0CheckSharedModules(pGVM, pVM, idCpu);
2153 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2154 break;
2155 }
2156#endif
2157
2158#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
2159 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
2160 if (u64Arg)
2161 return VERR_INVALID_PARAMETER;
2162 rc = GMMR0FindDuplicatePageReq(pGVM, pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
2163 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2164 break;
2165#endif
2166
2167 case VMMR0_DO_GMM_QUERY_STATISTICS:
2168 if (u64Arg)
2169 return VERR_INVALID_PARAMETER;
2170 rc = GMMR0QueryStatisticsReq(pGVM, pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
2171 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2172 break;
2173
2174 case VMMR0_DO_GMM_RESET_STATISTICS:
2175 if (u64Arg)
2176 return VERR_INVALID_PARAMETER;
2177 rc = GMMR0ResetStatisticsReq(pGVM, pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
2178 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2179 break;
2180
2181 /*
2182 * A quick GCFGM mock-up.
2183 */
2184 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
2185 case VMMR0_DO_GCFGM_SET_VALUE:
2186 case VMMR0_DO_GCFGM_QUERY_VALUE:
2187 {
2188 if (pGVM || pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2189 return VERR_INVALID_PARAMETER;
2190 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
2191 if (pReq->Hdr.cbReq != sizeof(*pReq))
2192 return VERR_INVALID_PARAMETER;
2193 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
2194 {
2195 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2196 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2197 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2198 }
2199 else
2200 {
2201 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2202 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2203 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2204 }
2205 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2206 break;
2207 }
2208
2209 /*
2210 * PDM Wrappers.
2211 */
2212 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2213 {
2214 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2215 return VERR_INVALID_PARAMETER;
2216 rc = PDMR0DriverCallReqHandler(pGVM, pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2217 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2218 break;
2219 }
2220
2221 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
2222 {
2223 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2224 return VERR_INVALID_PARAMETER;
2225 rc = PDMR0DeviceCallReqHandler(pGVM, pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
2226 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2227 break;
2228 }
2229
2230 /*
2231 * Requests to the internal networking service.
2232 */
2233 case VMMR0_DO_INTNET_OPEN:
2234 {
2235 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2236 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2237 return VERR_INVALID_PARAMETER;
2238 rc = IntNetR0OpenReq(pSession, pReq);
2239 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2240 break;
2241 }
2242
2243 case VMMR0_DO_INTNET_IF_CLOSE:
2244 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2245 return VERR_INVALID_PARAMETER;
2246 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2247 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2248 break;
2249
2250
2251 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2252 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2253 return VERR_INVALID_PARAMETER;
2254 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2255 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2256 break;
2257
2258 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2259 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2260 return VERR_INVALID_PARAMETER;
2261 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2262 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2263 break;
2264
2265 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2266 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2267 return VERR_INVALID_PARAMETER;
2268 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2269 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2270 break;
2271
2272 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2273 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2274 return VERR_INVALID_PARAMETER;
2275 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2276 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2277 break;
2278
2279 case VMMR0_DO_INTNET_IF_SEND:
2280 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2281 return VERR_INVALID_PARAMETER;
2282 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2283 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2284 break;
2285
2286 case VMMR0_DO_INTNET_IF_WAIT:
2287 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2288 return VERR_INVALID_PARAMETER;
2289 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2290 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2291 break;
2292
2293 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2294 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2295 return VERR_INVALID_PARAMETER;
2296 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2297 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2298 break;
2299
2300#ifdef VBOX_WITH_PCI_PASSTHROUGH
2301 /*
2302 * Requests to host PCI driver service.
2303 */
2304 case VMMR0_DO_PCIRAW_REQ:
2305 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2306 return VERR_INVALID_PARAMETER;
2307 rc = PciRawR0ProcessReq(pGVM, pVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2308 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2309 break;
2310#endif
2311
2312 /*
2313 * NEM requests.
2314 */
2315#ifdef VBOX_WITH_NEM_R0
2316# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2317 case VMMR0_DO_NEM_INIT_VM:
2318 if (u64Arg || pReqHdr || idCpu != 0)
2319 return VERR_INVALID_PARAMETER;
2320 rc = NEMR0InitVM(pGVM, pVM);
2321 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2322 break;
2323
2324 case VMMR0_DO_NEM_INIT_VM_PART_2:
2325 if (u64Arg || pReqHdr || idCpu != 0)
2326 return VERR_INVALID_PARAMETER;
2327 rc = NEMR0InitVMPart2(pGVM, pVM);
2328 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2329 break;
2330
2331 case VMMR0_DO_NEM_MAP_PAGES:
2332 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2333 return VERR_INVALID_PARAMETER;
2334 rc = NEMR0MapPages(pGVM, pVM, idCpu);
2335 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2336 break;
2337
2338 case VMMR0_DO_NEM_UNMAP_PAGES:
2339 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2340 return VERR_INVALID_PARAMETER;
2341 rc = NEMR0UnmapPages(pGVM, pVM, idCpu);
2342 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2343 break;
2344
2345 case VMMR0_DO_NEM_EXPORT_STATE:
2346 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2347 return VERR_INVALID_PARAMETER;
2348 rc = NEMR0ExportState(pGVM, pVM, idCpu);
2349 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2350 break;
2351
2352 case VMMR0_DO_NEM_IMPORT_STATE:
2353 if (pReqHdr || idCpu == NIL_VMCPUID)
2354 return VERR_INVALID_PARAMETER;
2355 rc = NEMR0ImportState(pGVM, pVM, idCpu, u64Arg);
2356 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2357 break;
2358
2359 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2360 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2361 return VERR_INVALID_PARAMETER;
2362 rc = NEMR0QueryCpuTick(pGVM, pVM, idCpu);
2363 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2364 break;
2365
2366 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2367 if (pReqHdr || idCpu == NIL_VMCPUID)
2368 return VERR_INVALID_PARAMETER;
2369 rc = NEMR0ResumeCpuTickOnAll(pGVM, pVM, idCpu, u64Arg);
2370 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2371 break;
2372
2373 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2374 if (u64Arg || pReqHdr)
2375 return VERR_INVALID_PARAMETER;
2376 rc = NEMR0UpdateStatistics(pGVM, pVM, idCpu);
2377 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2378 break;
2379
2380# if 1 && defined(DEBUG_bird)
2381 case VMMR0_DO_NEM_EXPERIMENT:
2382 if (pReqHdr)
2383 return VERR_INVALID_PARAMETER;
2384 rc = NEMR0DoExperiment(pGVM, pVM, idCpu, u64Arg);
2385 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2386 break;
2387# endif
2388# endif
2389#endif
2390
2391 /*
2392 * For profiling.
2393 */
2394 case VMMR0_DO_NOP:
2395 case VMMR0_DO_SLOW_NOP:
2396 return VINF_SUCCESS;
2397
2398 /*
2399 * For testing Ring-0 APIs invoked in this environment.
2400 */
2401 case VMMR0_DO_TESTS:
2402 /** @todo make new test */
2403 return VINF_SUCCESS;
2404
2405
2406#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
2407 case VMMR0_DO_TEST_SWITCHER3264:
2408 if (idCpu == NIL_VMCPUID)
2409 return VERR_INVALID_CPU_ID;
2410 rc = HMR0TestSwitcher3264(pVM);
2411 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2412 break;
2413#endif
2414 default:
2415 /*
2416 * We're returning VERR_NOT_SUPPORT here so we've got something else
2417 * than -1 which the interrupt gate glue code might return.
2418 */
2419 Log(("operation %#x is not supported\n", enmOperation));
2420 return VERR_NOT_SUPPORTED;
2421 }
2422 return rc;
2423}
2424
2425
2426/**
2427 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
2428 */
2429typedef struct VMMR0ENTRYEXARGS
2430{
2431 PGVM pGVM;
2432 PVM pVM;
2433 VMCPUID idCpu;
2434 VMMR0OPERATION enmOperation;
2435 PSUPVMMR0REQHDR pReq;
2436 uint64_t u64Arg;
2437 PSUPDRVSESSION pSession;
2438} VMMR0ENTRYEXARGS;
2439/** Pointer to a vmmR0EntryExWrapper argument package. */
2440typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
2441
2442/**
2443 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2444 *
2445 * @returns VBox status code.
2446 * @param pvArgs The argument package
2447 */
2448static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2449{
2450 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2451 ((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
2452 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2453 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2454 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2455 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2456 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2457}
2458
2459
2460/**
2461 * The Ring 0 entry point, called by the support library (SUP).
2462 *
2463 * @returns VBox status code.
2464 * @param pGVM The global (ring-0) VM structure.
2465 * @param pVM The cross context VM structure.
2466 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2467 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2468 * @param enmOperation Which operation to execute.
2469 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2470 * @param u64Arg Some simple constant argument.
2471 * @param pSession The session of the caller.
2472 * @remarks Assume called with interrupts _enabled_.
2473 */
2474VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2475 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2476{
2477 /*
2478 * Requests that should only happen on the EMT thread will be
2479 * wrapped in a setjmp so we can assert without causing trouble.
2480 */
2481 if ( pVM != NULL
2482 && pGVM != NULL
2483 && idCpu < pGVM->cCpus
2484 && pVM->pVMR0 != NULL)
2485 {
2486 switch (enmOperation)
2487 {
2488 /* These might/will be called before VMMR3Init. */
2489 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2490 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2491 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2492 case VMMR0_DO_GMM_FREE_PAGES:
2493 case VMMR0_DO_GMM_BALLOONED_PAGES:
2494 /* On the mac we might not have a valid jmp buf, so check these as well. */
2495 case VMMR0_DO_VMMR0_INIT:
2496 case VMMR0_DO_VMMR0_TERM:
2497 {
2498 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2499 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2500 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2501 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2502 && pVCpu->hNativeThreadR0 == hNativeThread))
2503 {
2504 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2505 break;
2506
2507 /** @todo validate this EMT claim... GVM knows. */
2508 VMMR0ENTRYEXARGS Args;
2509 Args.pGVM = pGVM;
2510 Args.pVM = pVM;
2511 Args.idCpu = idCpu;
2512 Args.enmOperation = enmOperation;
2513 Args.pReq = pReq;
2514 Args.u64Arg = u64Arg;
2515 Args.pSession = pSession;
2516 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2517 }
2518 return VERR_VM_THREAD_NOT_EMT;
2519 }
2520
2521 default:
2522 break;
2523 }
2524 }
2525 return vmmR0EntryExWorker(pGVM, pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2526}
2527
2528
2529/**
2530 * Checks whether we've armed the ring-0 long jump machinery.
2531 *
2532 * @returns @c true / @c false
2533 * @param pVCpu The cross context virtual CPU structure.
2534 * @thread EMT
2535 * @sa VMMIsLongJumpArmed
2536 */
2537VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
2538{
2539#ifdef RT_ARCH_X86
2540 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2541 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2542#else
2543 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2544 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2545#endif
2546}
2547
2548
2549/**
2550 * Checks whether we've done a ring-3 long jump.
2551 *
2552 * @returns @c true / @c false
2553 * @param pVCpu The cross context virtual CPU structure.
2554 * @thread EMT
2555 */
2556VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
2557{
2558 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2559}
2560
2561
2562/**
2563 * Internal R0 logger worker: Flush logger.
2564 *
2565 * @param pLogger The logger instance to flush.
2566 * @remark This function must be exported!
2567 */
2568VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2569{
2570#ifdef LOG_ENABLED
2571 /*
2572 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2573 * (This is a bit paranoid code.)
2574 */
2575 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_UOFFSETOF(VMMR0LOGGER, Logger));
2576 if ( !VALID_PTR(pR0Logger)
2577 || !VALID_PTR(pR0Logger + 1)
2578 || pLogger->u32Magic != RTLOGGER_MAGIC)
2579 {
2580# ifdef DEBUG
2581 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2582# endif
2583 return;
2584 }
2585 if (pR0Logger->fFlushingDisabled)
2586 return; /* quietly */
2587
2588 PVM pVM = pR0Logger->pVM;
2589 if ( !VALID_PTR(pVM)
2590 || pVM->pVMR0 != pVM)
2591 {
2592# ifdef DEBUG
2593 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
2594# endif
2595 return;
2596 }
2597
2598 PVMCPU pVCpu = VMMGetCpu(pVM);
2599 if (pVCpu)
2600 {
2601 /*
2602 * Check that the jump buffer is armed.
2603 */
2604# ifdef RT_ARCH_X86
2605 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2606 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2607# else
2608 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2609 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2610# endif
2611 {
2612# ifdef DEBUG
2613 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2614# endif
2615 return;
2616 }
2617 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2618 }
2619# ifdef DEBUG
2620 else
2621 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2622# endif
2623#else
2624 NOREF(pLogger);
2625#endif /* LOG_ENABLED */
2626}
2627
2628#ifdef LOG_ENABLED
2629
2630/**
2631 * Disables flushing of the ring-0 debug log.
2632 *
2633 * @param pVCpu The cross context virtual CPU structure.
2634 */
2635VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
2636{
2637 if (pVCpu->vmm.s.pR0LoggerR0)
2638 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2639 if (pVCpu->vmm.s.pR0RelLoggerR0)
2640 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = true;
2641}
2642
2643
2644/**
2645 * Enables flushing of the ring-0 debug log.
2646 *
2647 * @param pVCpu The cross context virtual CPU structure.
2648 */
2649VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
2650{
2651 if (pVCpu->vmm.s.pR0LoggerR0)
2652 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2653 if (pVCpu->vmm.s.pR0RelLoggerR0)
2654 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = false;
2655}
2656
2657
2658/**
2659 * Checks if log flushing is disabled or not.
2660 *
2661 * @param pVCpu The cross context virtual CPU structure.
2662 */
2663VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
2664{
2665 if (pVCpu->vmm.s.pR0LoggerR0)
2666 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2667 if (pVCpu->vmm.s.pR0RelLoggerR0)
2668 return pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled;
2669 return true;
2670}
2671
2672#endif /* LOG_ENABLED */
2673
2674/**
2675 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
2676 */
2677DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
2678{
2679 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
2680 if (pGVCpu)
2681 {
2682 PVMCPU pVCpu = pGVCpu->pVCpu;
2683 if (RT_VALID_PTR(pVCpu))
2684 {
2685 PVMMR0LOGGER pVmmLogger = pVCpu->vmm.s.pR0RelLoggerR0;
2686 if (RT_VALID_PTR(pVmmLogger))
2687 {
2688 if ( pVmmLogger->fCreated
2689 && pVmmLogger->pVM == pGVCpu->pVM)
2690 {
2691 if (pVmmLogger->Logger.fFlags & RTLOGFLAGS_DISABLED)
2692 return NULL;
2693 uint16_t const fFlags = RT_LO_U16(fFlagsAndGroup);
2694 uint16_t const iGroup = RT_HI_U16(fFlagsAndGroup);
2695 if ( iGroup != UINT16_MAX
2696 && ( ( pVmmLogger->Logger.afGroups[iGroup < pVmmLogger->Logger.cGroups ? iGroup : 0]
2697 & (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED))
2698 != (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED)))
2699 return NULL;
2700 return &pVmmLogger->Logger;
2701 }
2702 }
2703 }
2704 }
2705 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
2706}
2707
2708
2709/**
2710 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2711 *
2712 * @returns true if the breakpoint should be hit, false if it should be ignored.
2713 */
2714DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2715{
2716#if 0
2717 return true;
2718#else
2719 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2720 if (pVM)
2721 {
2722 PVMCPU pVCpu = VMMGetCpu(pVM);
2723
2724 if (pVCpu)
2725 {
2726#ifdef RT_ARCH_X86
2727 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2728 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2729#else
2730 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2731 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2732#endif
2733 {
2734 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2735 return RT_FAILURE_NP(rc);
2736 }
2737 }
2738 }
2739#ifdef RT_OS_LINUX
2740 return true;
2741#else
2742 return false;
2743#endif
2744#endif
2745}
2746
2747
2748/**
2749 * Override this so we can push it up to ring-3.
2750 *
2751 * @param pszExpr Expression. Can be NULL.
2752 * @param uLine Location line number.
2753 * @param pszFile Location file name.
2754 * @param pszFunction Location function name.
2755 */
2756DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2757{
2758 /*
2759 * To the log.
2760 */
2761 LogAlways(("\n!!R0-Assertion Failed!!\n"
2762 "Expression: %s\n"
2763 "Location : %s(%d) %s\n",
2764 pszExpr, pszFile, uLine, pszFunction));
2765
2766 /*
2767 * To the global VMM buffer.
2768 */
2769 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2770 if (pVM)
2771 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2772 "\n!!R0-Assertion Failed!!\n"
2773 "Expression: %.*s\n"
2774 "Location : %s(%d) %s\n",
2775 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2776 pszFile, uLine, pszFunction);
2777
2778 /*
2779 * Continue the normal way.
2780 */
2781 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2782}
2783
2784
2785/**
2786 * Callback for RTLogFormatV which writes to the ring-3 log port.
2787 * See PFNLOGOUTPUT() for details.
2788 */
2789static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2790{
2791 for (size_t i = 0; i < cbChars; i++)
2792 {
2793 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2794 }
2795
2796 NOREF(pv);
2797 return cbChars;
2798}
2799
2800
2801/**
2802 * Override this so we can push it up to ring-3.
2803 *
2804 * @param pszFormat The format string.
2805 * @param va Arguments.
2806 */
2807DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2808{
2809 va_list vaCopy;
2810
2811 /*
2812 * Push the message to the loggers.
2813 */
2814 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2815 if (pLog)
2816 {
2817 va_copy(vaCopy, va);
2818 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2819 va_end(vaCopy);
2820 }
2821 pLog = RTLogRelGetDefaultInstance();
2822 if (pLog)
2823 {
2824 va_copy(vaCopy, va);
2825 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2826 va_end(vaCopy);
2827 }
2828
2829 /*
2830 * Push it to the global VMM buffer.
2831 */
2832 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2833 if (pVM)
2834 {
2835 va_copy(vaCopy, va);
2836 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2837 va_end(vaCopy);
2838 }
2839
2840 /*
2841 * Continue the normal way.
2842 */
2843 RTAssertMsg2V(pszFormat, va);
2844}
2845
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette