VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 90724

Last change on this file since 90724 was 90380, checked in by vboxsync, 4 years ago

VMM: Moved idHostCpu and iHostCpuSet from VMCPU to GVMCPU, removing the VMMR0PERVCPU copies too. bugref:6695

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 108.3 KB
Line 
1/* $Id: VMMR0.cpp 90380 2021-07-28 21:38:23Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_NEM_R0
31# include <VBox/vmm/nem.h>
32#endif
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/stam.h>
35#include <VBox/vmm/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvm.h>
39#ifdef VBOX_WITH_PCI_PASSTHROUGH
40# include <VBox/vmm/pdmpci.h>
41#endif
42#include <VBox/vmm/apic.h>
43
44#include <VBox/vmm/gvmm.h>
45#include <VBox/vmm/gmm.h>
46#include <VBox/vmm/gim.h>
47#include <VBox/intnet.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/param.h>
50#include <VBox/err.h>
51#include <VBox/version.h>
52#include <VBox/log.h>
53
54#include <iprt/asm-amd64-x86.h>
55#include <iprt/assert.h>
56#include <iprt/crc.h>
57#include <iprt/mp.h>
58#include <iprt/once.h>
59#include <iprt/stdarg.h>
60#include <iprt/string.h>
61#include <iprt/thread.h>
62#include <iprt/timer.h>
63#include <iprt/time.h>
64
65#include "dtrace/VBoxVMM.h"
66
67
68#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
69# pragma intrinsic(_AddressOfReturnAddress)
70#endif
71
72#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
73# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
74#endif
75
76
77
78/*********************************************************************************************************************************
79* Defined Constants And Macros *
80*********************************************************************************************************************************/
81/** @def VMM_CHECK_SMAP_SETUP
82 * SMAP check setup. */
83/** @def VMM_CHECK_SMAP_CHECK
84 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
85 * it will be logged and @a a_BadExpr is executed. */
86/** @def VMM_CHECK_SMAP_CHECK2
87 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
88 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
89 * executed. */
90#if (defined(VBOX_STRICT) || 1) && !defined(VBOX_WITH_RAM_IN_KERNEL)
91# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
92# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
93 do { \
94 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
95 { \
96 RTCCUINTREG fEflCheck = ASMGetFlags(); \
97 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
98 { /* likely */ } \
99 else \
100 { \
101 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
102 a_BadExpr; \
103 } \
104 } \
105 } while (0)
106# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \
107 do { \
108 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
109 { \
110 RTCCUINTREG fEflCheck = ASMGetFlags(); \
111 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
112 { /* likely */ } \
113 else if (a_pGVM) \
114 { \
115 SUPR0BadContext((a_pGVM)->pSession, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
116 RTStrPrintf((a_pGVM)->vmm.s.szRing0AssertMsg1, sizeof((a_pGVM)->vmm.s.szRing0AssertMsg1), \
117 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
118 a_BadExpr; \
119 } \
120 else \
121 { \
122 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
123 a_BadExpr; \
124 } \
125 } \
126 } while (0)
127#else
128# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
129# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
130# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) NOREF(fKernelFeatures)
131#endif
132
133
134/*********************************************************************************************************************************
135* Internal Functions *
136*********************************************************************************************************************************/
137RT_C_DECLS_BEGIN
138#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
139extern uint64_t __udivdi3(uint64_t, uint64_t);
140extern uint64_t __umoddi3(uint64_t, uint64_t);
141#endif
142RT_C_DECLS_END
143
144
145/*********************************************************************************************************************************
146* Global Variables *
147*********************************************************************************************************************************/
148/** Drag in necessary library bits.
149 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
150struct CLANG11WEIRDNOTHROW { PFNRT pfn; } g_VMMR0Deps[] =
151{
152 { (PFNRT)RTCrc32 },
153 { (PFNRT)RTOnce },
154#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
155 { (PFNRT)__udivdi3 },
156 { (PFNRT)__umoddi3 },
157#endif
158 { NULL }
159};
160
161#ifdef RT_OS_SOLARIS
162/* Dependency information for the native solaris loader. */
163extern "C" { char _depends_on[] = "vboxdrv"; }
164#endif
165
166
167/**
168 * Initialize the module.
169 * This is called when we're first loaded.
170 *
171 * @returns 0 on success.
172 * @returns VBox status on failure.
173 * @param hMod Image handle for use in APIs.
174 */
175DECLEXPORT(int) ModuleInit(void *hMod)
176{
177 VMM_CHECK_SMAP_SETUP();
178 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
179
180#ifdef VBOX_WITH_DTRACE_R0
181 /*
182 * The first thing to do is register the static tracepoints.
183 * (Deregistration is automatic.)
184 */
185 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
186 if (RT_FAILURE(rc2))
187 return rc2;
188#endif
189 LogFlow(("ModuleInit:\n"));
190
191#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
192 /*
193 * Display the CMOS debug code.
194 */
195 ASMOutU8(0x72, 0x03);
196 uint8_t bDebugCode = ASMInU8(0x73);
197 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
198 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
199#endif
200
201 /*
202 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
203 */
204 int rc = vmmInitFormatTypes();
205 if (RT_SUCCESS(rc))
206 {
207 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
208 rc = GVMMR0Init();
209 if (RT_SUCCESS(rc))
210 {
211 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
212 rc = GMMR0Init();
213 if (RT_SUCCESS(rc))
214 {
215 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
216 rc = HMR0Init();
217 if (RT_SUCCESS(rc))
218 {
219 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
220
221 PDMR0Init(hMod);
222 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
223
224 rc = PGMRegisterStringFormatTypes();
225 if (RT_SUCCESS(rc))
226 {
227 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
228#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
229 rc = PGMR0DynMapInit();
230#endif
231 if (RT_SUCCESS(rc))
232 {
233 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
234 rc = IntNetR0Init();
235 if (RT_SUCCESS(rc))
236 {
237#ifdef VBOX_WITH_PCI_PASSTHROUGH
238 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
239 rc = PciRawR0Init();
240#endif
241 if (RT_SUCCESS(rc))
242 {
243 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
244 rc = CPUMR0ModuleInit();
245 if (RT_SUCCESS(rc))
246 {
247#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
248 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
249 rc = vmmR0TripleFaultHackInit();
250 if (RT_SUCCESS(rc))
251#endif
252 {
253 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
254 if (RT_SUCCESS(rc))
255 {
256 LogFlow(("ModuleInit: returns success\n"));
257 return VINF_SUCCESS;
258 }
259 }
260
261 /*
262 * Bail out.
263 */
264#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
265 vmmR0TripleFaultHackTerm();
266#endif
267 }
268 else
269 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
270#ifdef VBOX_WITH_PCI_PASSTHROUGH
271 PciRawR0Term();
272#endif
273 }
274 else
275 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
276 IntNetR0Term();
277 }
278 else
279 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
280#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
281 PGMR0DynMapTerm();
282#endif
283 }
284 else
285 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
286 PGMDeregisterStringFormatTypes();
287 }
288 else
289 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
290 HMR0Term();
291 }
292 else
293 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
294 GMMR0Term();
295 }
296 else
297 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
298 GVMMR0Term();
299 }
300 else
301 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
302 vmmTermFormatTypes();
303 }
304 else
305 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
306
307 LogFlow(("ModuleInit: failed %Rrc\n", rc));
308 return rc;
309}
310
311
312/**
313 * Terminate the module.
314 * This is called when we're finally unloaded.
315 *
316 * @param hMod Image handle for use in APIs.
317 */
318DECLEXPORT(void) ModuleTerm(void *hMod)
319{
320 NOREF(hMod);
321 LogFlow(("ModuleTerm:\n"));
322
323 /*
324 * Terminate the CPUM module (Local APIC cleanup).
325 */
326 CPUMR0ModuleTerm();
327
328 /*
329 * Terminate the internal network service.
330 */
331 IntNetR0Term();
332
333 /*
334 * PGM (Darwin), HM and PciRaw global cleanup.
335 */
336#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
337 PGMR0DynMapTerm();
338#endif
339#ifdef VBOX_WITH_PCI_PASSTHROUGH
340 PciRawR0Term();
341#endif
342 PGMDeregisterStringFormatTypes();
343 HMR0Term();
344#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
345 vmmR0TripleFaultHackTerm();
346#endif
347
348 /*
349 * Destroy the GMM and GVMM instances.
350 */
351 GMMR0Term();
352 GVMMR0Term();
353
354 vmmTermFormatTypes();
355
356 LogFlow(("ModuleTerm: returns\n"));
357}
358
359
360/**
361 * Initializes VMM specific members when the GVM structure is created.
362 *
363 * @param pGVM The global (ring-0) VM structure.
364 */
365VMMR0_INT_DECL(void) VMMR0InitPerVMData(PGVM pGVM)
366{
367 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
368 {
369 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
370 Assert(pGVCpu->idHostCpu == NIL_RTCPUID);
371 Assert(pGVCpu->iHostCpuSet == UINT32_MAX);
372 pGVCpu->vmmr0.s.fInHmContext = false;
373 pGVCpu->vmmr0.s.pPreemptState = NULL;
374 pGVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
375 }
376}
377
378
379/**
380 * Initiates the R0 driver for a particular VM instance.
381 *
382 * @returns VBox status code.
383 *
384 * @param pGVM The global (ring-0) VM structure.
385 * @param uSvnRev The SVN revision of the ring-3 part.
386 * @param uBuildType Build type indicator.
387 * @thread EMT(0)
388 */
389static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
390{
391 VMM_CHECK_SMAP_SETUP();
392 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
393
394 /*
395 * Match the SVN revisions and build type.
396 */
397 if (uSvnRev != VMMGetSvnRev())
398 {
399 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
400 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
401 return VERR_VMM_R0_VERSION_MISMATCH;
402 }
403 if (uBuildType != vmmGetBuildType())
404 {
405 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
406 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
407 return VERR_VMM_R0_VERSION_MISMATCH;
408 }
409
410 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
411 if (RT_FAILURE(rc))
412 return rc;
413
414#ifdef LOG_ENABLED
415 /*
416 * Register the EMT R0 logger instance for VCPU 0.
417 */
418 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
419
420 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
421 if (pR0Logger)
422 {
423# if 0 /* testing of the logger. */
424 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
425 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
426 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
427 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
428
429 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
430 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
431 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
432 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
433
434 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
435 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
436 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
437 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
438
439 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
440 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
441 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
442 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
443 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
444 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
445
446 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
447 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
448
449 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
450 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
451 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
452# endif
453 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pGVM->pSession));
454 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
455 pR0Logger->fRegistered = true;
456 }
457#endif /* LOG_ENABLED */
458
459 /*
460 * Check if the host supports high resolution timers or not.
461 */
462 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
463 && !RTTimerCanDoHighResolution())
464 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
465
466 /*
467 * Initialize the per VM data for GVMM and GMM.
468 */
469 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
470 rc = GVMMR0InitVM(pGVM);
471 if (RT_SUCCESS(rc))
472 {
473 /*
474 * Init HM, CPUM and PGM (Darwin only).
475 */
476 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
477 rc = HMR0InitVM(pGVM);
478 if (RT_SUCCESS(rc))
479 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
480 if (RT_SUCCESS(rc))
481 {
482 rc = CPUMR0InitVM(pGVM);
483 if (RT_SUCCESS(rc))
484 {
485 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
486 rc = PGMR0InitVM(pGVM);
487 if (RT_SUCCESS(rc))
488 {
489 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
490 rc = EMR0InitVM(pGVM);
491 if (RT_SUCCESS(rc))
492 {
493 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
494#ifdef VBOX_WITH_PCI_PASSTHROUGH
495 rc = PciRawR0InitVM(pGVM);
496#endif
497 if (RT_SUCCESS(rc))
498 {
499 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
500 rc = GIMR0InitVM(pGVM);
501 if (RT_SUCCESS(rc))
502 {
503 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION);
504 if (RT_SUCCESS(rc))
505 {
506 GVMMR0DoneInitVM(pGVM);
507
508 /*
509 * Collect a bit of info for the VM release log.
510 */
511 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
512 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
513
514 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
515 return rc;
516 }
517
518 /* bail out*/
519 GIMR0TermVM(pGVM);
520 }
521#ifdef VBOX_WITH_PCI_PASSTHROUGH
522 PciRawR0TermVM(pGVM);
523#endif
524 }
525 }
526 }
527 }
528 HMR0TermVM(pGVM);
529 }
530 }
531
532 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
533 return rc;
534}
535
536
537/**
538 * Does EMT specific VM initialization.
539 *
540 * @returns VBox status code.
541 * @param pGVM The ring-0 VM structure.
542 * @param idCpu The EMT that's calling.
543 */
544static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
545{
546 /* Paranoia (caller checked these already). */
547 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
548 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
549
550#ifdef LOG_ENABLED
551 /*
552 * Registration of ring 0 loggers.
553 */
554 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
555 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
556 if ( pR0Logger
557 && !pR0Logger->fRegistered)
558 {
559 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
560 pR0Logger->fRegistered = true;
561 }
562#endif
563
564 return VINF_SUCCESS;
565}
566
567
568
569/**
570 * Terminates the R0 bits for a particular VM instance.
571 *
572 * This is normally called by ring-3 as part of the VM termination process, but
573 * may alternatively be called during the support driver session cleanup when
574 * the VM object is destroyed (see GVMM).
575 *
576 * @returns VBox status code.
577 *
578 * @param pGVM The global (ring-0) VM structure.
579 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
580 * thread.
581 * @thread EMT(0) or session clean up thread.
582 */
583VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
584{
585 /*
586 * Check EMT(0) claim if we're called from userland.
587 */
588 if (idCpu != NIL_VMCPUID)
589 {
590 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
591 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
592 if (RT_FAILURE(rc))
593 return rc;
594 }
595
596#ifdef VBOX_WITH_PCI_PASSTHROUGH
597 PciRawR0TermVM(pGVM);
598#endif
599
600 /*
601 * Tell GVMM what we're up to and check that we only do this once.
602 */
603 if (GVMMR0DoingTermVM(pGVM))
604 {
605 GIMR0TermVM(pGVM);
606
607 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
608 * here to make sure we don't leak any shared pages if we crash... */
609#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
610 PGMR0DynMapTermVM(pGVM);
611#endif
612 HMR0TermVM(pGVM);
613 }
614
615 /*
616 * Deregister the logger.
617 */
618 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
619 return VINF_SUCCESS;
620}
621
622
623/**
624 * An interrupt or unhalt force flag is set, deal with it.
625 *
626 * @returns VINF_SUCCESS (or VINF_EM_HALT).
627 * @param pVCpu The cross context virtual CPU structure.
628 * @param uMWait Result from EMMonitorWaitIsActive().
629 * @param enmInterruptibility Guest CPU interruptbility level.
630 */
631static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
632{
633 Assert(!TRPMHasTrap(pVCpu));
634 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
635 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
636
637 /*
638 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
639 */
640 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
641 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
642 {
643 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
644 {
645 uint8_t u8Interrupt = 0;
646 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
647 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
648 if (RT_SUCCESS(rc))
649 {
650 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
651
652 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
653 AssertRCSuccess(rc);
654 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
655 return rc;
656 }
657 }
658 }
659 /*
660 * SMI is not implemented yet, at least not here.
661 */
662 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
663 {
664 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #3\n", pVCpu->idCpu));
665 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
666 return VINF_EM_HALT;
667 }
668 /*
669 * NMI.
670 */
671 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
672 {
673 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
674 {
675 /** @todo later. */
676 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #2 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
677 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
678 return VINF_EM_HALT;
679 }
680 }
681 /*
682 * Nested-guest virtual interrupt.
683 */
684 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
685 {
686 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
687 {
688 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
689 * here before injecting the virtual interrupt. See emR3ForcedActions
690 * for details. */
691 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #1 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
692 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
693 return VINF_EM_HALT;
694 }
695 }
696
697 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
698 {
699 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
700 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (UNHALT)\n", pVCpu->idCpu));
701 return VINF_SUCCESS;
702 }
703 if (uMWait > 1)
704 {
705 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
706 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (uMWait=%u > 1)\n", pVCpu->idCpu, uMWait));
707 return VINF_SUCCESS;
708 }
709
710 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #0 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
711 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
712 return VINF_EM_HALT;
713}
714
715
716/**
717 * This does one round of vmR3HaltGlobal1Halt().
718 *
719 * The rational here is that we'll reduce latency in interrupt situations if we
720 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
721 * MWAIT), but do one round of blocking here instead and hope the interrupt is
722 * raised in the meanwhile.
723 *
724 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
725 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
726 * ring-0 call (unless we're too close to a timer event). When the interrupt
727 * wakes us up, we'll return from ring-0 and EM will by instinct do a
728 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
729 * back to VMMR0EntryFast().
730 *
731 * @returns VINF_SUCCESS or VINF_EM_HALT.
732 * @param pGVM The ring-0 VM structure.
733 * @param pGVCpu The ring-0 virtual CPU structure.
734 *
735 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
736 * the VM module, probably to VMM. Then this would be more weird wrt
737 * parameters and statistics.
738 */
739static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
740{
741 /*
742 * Do spin stat historization.
743 */
744 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
745 { /* likely */ }
746 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
747 {
748 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
749 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
750 }
751 else
752 {
753 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
754 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
755 }
756
757 /*
758 * Flags that makes us go to ring-3.
759 */
760 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
761 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
762 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
763 | VM_FF_PGM_NO_MEMORY | VM_FF_DEBUG_SUSPEND;
764 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
765 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
766 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
767 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
768
769 /*
770 * Check preconditions.
771 */
772 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
773 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
774 if ( pGVCpu->vmm.s.fMayHaltInRing0
775 && !TRPMHasTrap(pGVCpu)
776 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
777 || uMWait > 1))
778 {
779 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
780 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
781 {
782 /*
783 * Interrupts pending already?
784 */
785 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
786 APICUpdatePendingInterrupts(pGVCpu);
787
788 /*
789 * Flags that wake up from the halted state.
790 */
791 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
792 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
793
794 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
795 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
796 ASMNopPause();
797
798 /*
799 * Check out how long till the next timer event.
800 */
801 uint64_t u64Delta;
802 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
803
804 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
805 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
806 {
807 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
808 APICUpdatePendingInterrupts(pGVCpu);
809
810 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
811 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
812
813 /*
814 * Wait if there is enough time to the next timer event.
815 */
816 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
817 {
818 /* If there are few other CPU cores around, we will procrastinate a
819 little before going to sleep, hoping for some device raising an
820 interrupt or similar. Though, the best thing here would be to
821 dynamically adjust the spin count according to its usfulness or
822 something... */
823 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
824 && RTMpGetOnlineCount() >= 4)
825 {
826 /** @todo Figure out how we can skip this if it hasn't help recently...
827 * @bugref{9172#c12} */
828 uint32_t cSpinLoops = 42;
829 while (cSpinLoops-- > 0)
830 {
831 ASMNopPause();
832 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
833 APICUpdatePendingInterrupts(pGVCpu);
834 ASMNopPause();
835 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
836 {
837 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
838 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
839 return VINF_EM_HALT;
840 }
841 ASMNopPause();
842 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
843 {
844 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
845 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
846 return VINF_EM_HALT;
847 }
848 ASMNopPause();
849 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
850 {
851 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
852 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
853 }
854 ASMNopPause();
855 }
856 }
857
858 /*
859 * We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
860 * knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here).
861 * After changing the state we must recheck the force flags of course.
862 */
863 if (VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED))
864 {
865 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
866 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
867 {
868 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
869 APICUpdatePendingInterrupts(pGVCpu);
870
871 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
872 {
873 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
874 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
875 }
876
877 /* Okay, block! */
878 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
879 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
880 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
881 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
882 Log10(("vmmR0DoHalt: CPU%d: halted %llu ns\n", pGVCpu->idCpu, cNsElapsedSchedHalt));
883
884 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
885 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
886 if ( rc == VINF_SUCCESS
887 || rc == VERR_INTERRUPTED)
888 {
889 /* Keep some stats like ring-3 does. */
890 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
891 if (cNsOverslept > 50000)
892 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
893 else if (cNsOverslept < -50000)
894 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
895 else
896 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
897
898 /*
899 * Recheck whether we can resume execution or have to go to ring-3.
900 */
901 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
902 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
903 {
904 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
905 APICUpdatePendingInterrupts(pGVCpu);
906 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
907 {
908 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
909 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
910 }
911 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostNoInt);
912 Log12(("vmmR0DoHalt: CPU%d post #2 - No pending interrupt\n", pGVCpu->idCpu));
913 }
914 else
915 {
916 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostPendingFF);
917 Log12(("vmmR0DoHalt: CPU%d post #1 - Pending FF\n", pGVCpu->idCpu));
918 }
919 }
920 else
921 {
922 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
923 Log12(("vmmR0DoHalt: CPU%d GVMMR0SchedHalt failed: %Rrc\n", pGVCpu->idCpu, rc));
924 }
925 }
926 else
927 {
928 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
929 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
930 Log12(("vmmR0DoHalt: CPU%d failed #5 - Pending FF\n", pGVCpu->idCpu));
931 }
932 }
933 else
934 {
935 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
936 Log12(("vmmR0DoHalt: CPU%d failed #4 - enmState=%d\n", pGVCpu->idCpu, VMCPU_GET_STATE(pGVCpu)));
937 }
938 }
939 else
940 {
941 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3SmallDelta);
942 Log12(("vmmR0DoHalt: CPU%d failed #3 - delta too small: %RU64\n", pGVCpu->idCpu, u64Delta));
943 }
944 }
945 else
946 {
947 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
948 Log12(("vmmR0DoHalt: CPU%d failed #2 - Pending FF\n", pGVCpu->idCpu));
949 }
950 }
951 else
952 {
953 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
954 Log12(("vmmR0DoHalt: CPU%d failed #1 - Pending FF\n", pGVCpu->idCpu));
955 }
956 }
957 else
958 {
959 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
960 Log12(("vmmR0DoHalt: CPU%d failed #0 - fMayHaltInRing0=%d TRPMHasTrap=%d enmInt=%d uMWait=%u\n",
961 pGVCpu->idCpu, pGVCpu->vmm.s.fMayHaltInRing0, TRPMHasTrap(pGVCpu), enmInterruptibility, uMWait));
962 }
963
964 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
965 return VINF_EM_HALT;
966}
967
968
969/**
970 * VMM ring-0 thread-context callback.
971 *
972 * This does common HM state updating and calls the HM-specific thread-context
973 * callback.
974 *
975 * This is used together with RTThreadCtxHookCreate() on platforms which
976 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
977 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
978 *
979 * @param enmEvent The thread-context event.
980 * @param pvUser Opaque pointer to the VMCPU.
981 *
982 * @thread EMT(pvUser)
983 */
984static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
985{
986 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
987
988 switch (enmEvent)
989 {
990 case RTTHREADCTXEVENT_IN:
991 {
992 /*
993 * Linux may call us with preemption enabled (really!) but technically we
994 * cannot get preempted here, otherwise we end up in an infinite recursion
995 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
996 * ad infinitum). Let's just disable preemption for now...
997 */
998 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
999 * preemption after doing the callout (one or two functions up the
1000 * call chain). */
1001 /** @todo r=ramshankar: See @bugref{5313#c30}. */
1002 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1003 RTThreadPreemptDisable(&ParanoidPreemptState);
1004
1005 /* We need to update the VCPU <-> host CPU mapping. */
1006 RTCPUID idHostCpu;
1007 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1008 pVCpu->iHostCpuSet = iHostCpuSet;
1009 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1010
1011 /* In the very unlikely event that the GIP delta for the CPU we're
1012 rescheduled needs calculating, try force a return to ring-3.
1013 We unfortunately cannot do the measurements right here. */
1014 if (RT_LIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1015 { /* likely */ }
1016 else
1017 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1018
1019 /* Invoke the HM-specific thread-context callback. */
1020 HMR0ThreadCtxCallback(enmEvent, pvUser);
1021
1022 /* Restore preemption. */
1023 RTThreadPreemptRestore(&ParanoidPreemptState);
1024 break;
1025 }
1026
1027 case RTTHREADCTXEVENT_OUT:
1028 {
1029 /* Invoke the HM-specific thread-context callback. */
1030 HMR0ThreadCtxCallback(enmEvent, pvUser);
1031
1032 /*
1033 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
1034 * have the same host CPU associated with it.
1035 */
1036 pVCpu->iHostCpuSet = UINT32_MAX;
1037 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1038 break;
1039 }
1040
1041 default:
1042 /* Invoke the HM-specific thread-context callback. */
1043 HMR0ThreadCtxCallback(enmEvent, pvUser);
1044 break;
1045 }
1046}
1047
1048
1049/**
1050 * Creates thread switching hook for the current EMT thread.
1051 *
1052 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
1053 * platform does not implement switcher hooks, no hooks will be create and the
1054 * member set to NIL_RTTHREADCTXHOOK.
1055 *
1056 * @returns VBox status code.
1057 * @param pVCpu The cross context virtual CPU structure.
1058 * @thread EMT(pVCpu)
1059 */
1060VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
1061{
1062 VMCPU_ASSERT_EMT(pVCpu);
1063 Assert(pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK);
1064
1065#if 1 /* To disable this stuff change to zero. */
1066 int rc = RTThreadCtxHookCreate(&pVCpu->vmmr0.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
1067 if (RT_SUCCESS(rc))
1068 {
1069 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = true;
1070 return rc;
1071 }
1072#else
1073 RT_NOREF(vmmR0ThreadCtxCallback);
1074 int rc = VERR_NOT_SUPPORTED;
1075#endif
1076
1077 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1078 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = false;
1079 if (rc == VERR_NOT_SUPPORTED)
1080 return VINF_SUCCESS;
1081
1082 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
1083 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
1084}
1085
1086
1087/**
1088 * Destroys the thread switching hook for the specified VCPU.
1089 *
1090 * @param pVCpu The cross context virtual CPU structure.
1091 * @remarks Can be called from any thread.
1092 */
1093VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
1094{
1095 int rc = RTThreadCtxHookDestroy(pVCpu->vmmr0.s.hCtxHook);
1096 AssertRC(rc);
1097 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1098}
1099
1100
1101/**
1102 * Disables the thread switching hook for this VCPU (if we got one).
1103 *
1104 * @param pVCpu The cross context virtual CPU structure.
1105 * @thread EMT(pVCpu)
1106 *
1107 * @remarks This also clears GVMCPU::idHostCpu, so the mapping is invalid after
1108 * this call. This means you have to be careful with what you do!
1109 */
1110VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1111{
1112 /*
1113 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1114 * @bugref{7726#c19} explains the need for this trick:
1115 *
1116 * VMXR0CallRing3Callback/SVMR0CallRing3Callback &
1117 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1118 * longjmp & normal return to ring-3, which opens a window where we may be
1119 * rescheduled without changing GVMCPUID::idHostCpu and cause confusion if
1120 * the CPU starts executing a different EMT. Both functions first disables
1121 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1122 * an opening for getting preempted.
1123 */
1124 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1125 * all the time. */
1126
1127 /*
1128 * Disable the context hook, if we got one.
1129 */
1130 if (pVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1131 {
1132 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1133 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1134 int rc = RTThreadCtxHookDisable(pVCpu->vmmr0.s.hCtxHook);
1135 AssertRC(rc);
1136 }
1137}
1138
1139
1140/**
1141 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1142 *
1143 * @returns true if registered, false otherwise.
1144 * @param pVCpu The cross context virtual CPU structure.
1145 */
1146DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1147{
1148 return RTThreadCtxHookIsEnabled(pVCpu->vmmr0.s.hCtxHook);
1149}
1150
1151
1152/**
1153 * Whether thread-context hooks are registered for this VCPU.
1154 *
1155 * @returns true if registered, false otherwise.
1156 * @param pVCpu The cross context virtual CPU structure.
1157 */
1158VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1159{
1160 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1161}
1162
1163
1164/**
1165 * Returns the ring-0 release logger instance.
1166 *
1167 * @returns Pointer to release logger, NULL if not configured.
1168 * @param pVCpu The cross context virtual CPU structure of the caller.
1169 * @thread EMT(pVCpu)
1170 */
1171VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu)
1172{
1173 PVMMR0LOGGER pLogger = pVCpu->vmm.s.pR0RelLoggerR0;
1174 if (pLogger)
1175 return &pLogger->Logger;
1176 return NULL;
1177}
1178
1179
1180#ifdef VBOX_WITH_STATISTICS
1181/**
1182 * Record return code statistics
1183 * @param pVM The cross context VM structure.
1184 * @param pVCpu The cross context virtual CPU structure.
1185 * @param rc The status code.
1186 */
1187static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1188{
1189 /*
1190 * Collect statistics.
1191 */
1192 switch (rc)
1193 {
1194 case VINF_SUCCESS:
1195 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1196 break;
1197 case VINF_EM_RAW_INTERRUPT:
1198 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1199 break;
1200 case VINF_EM_RAW_INTERRUPT_HYPER:
1201 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1202 break;
1203 case VINF_EM_RAW_GUEST_TRAP:
1204 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1205 break;
1206 case VINF_EM_RAW_RING_SWITCH:
1207 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1208 break;
1209 case VINF_EM_RAW_RING_SWITCH_INT:
1210 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1211 break;
1212 case VINF_EM_RAW_STALE_SELECTOR:
1213 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1214 break;
1215 case VINF_EM_RAW_IRET_TRAP:
1216 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1217 break;
1218 case VINF_IOM_R3_IOPORT_READ:
1219 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1220 break;
1221 case VINF_IOM_R3_IOPORT_WRITE:
1222 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1223 break;
1224 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1225 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1226 break;
1227 case VINF_IOM_R3_MMIO_READ:
1228 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1229 break;
1230 case VINF_IOM_R3_MMIO_WRITE:
1231 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1232 break;
1233 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1234 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1235 break;
1236 case VINF_IOM_R3_MMIO_READ_WRITE:
1237 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1238 break;
1239 case VINF_PATM_HC_MMIO_PATCH_READ:
1240 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1241 break;
1242 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1243 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1244 break;
1245 case VINF_CPUM_R3_MSR_READ:
1246 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1247 break;
1248 case VINF_CPUM_R3_MSR_WRITE:
1249 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1250 break;
1251 case VINF_EM_RAW_EMULATE_INSTR:
1252 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1253 break;
1254 case VINF_PATCH_EMULATE_INSTR:
1255 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1256 break;
1257 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1258 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1259 break;
1260 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1261 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1262 break;
1263 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1264 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1265 break;
1266 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1267 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1268 break;
1269 case VINF_CSAM_PENDING_ACTION:
1270 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1271 break;
1272 case VINF_PGM_SYNC_CR3:
1273 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1274 break;
1275 case VINF_PATM_PATCH_INT3:
1276 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1277 break;
1278 case VINF_PATM_PATCH_TRAP_PF:
1279 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1280 break;
1281 case VINF_PATM_PATCH_TRAP_GP:
1282 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1283 break;
1284 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1285 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1286 break;
1287 case VINF_EM_RESCHEDULE_REM:
1288 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1289 break;
1290 case VINF_EM_RAW_TO_R3:
1291 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1292 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1293 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1294 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1295 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1296 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1297 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1298 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1299 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1300 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1301 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1302 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1303 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1304 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1305 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1306 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1307 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1308 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1309 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1310 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1311 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1312 else
1313 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1314 break;
1315
1316 case VINF_EM_RAW_TIMER_PENDING:
1317 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1318 break;
1319 case VINF_EM_RAW_INTERRUPT_PENDING:
1320 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1321 break;
1322 case VINF_VMM_CALL_HOST:
1323 switch (pVCpu->vmm.s.enmCallRing3Operation)
1324 {
1325 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1326 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1327 break;
1328 case VMMCALLRING3_PDM_LOCK:
1329 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1330 break;
1331 case VMMCALLRING3_PGM_POOL_GROW:
1332 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1333 break;
1334 case VMMCALLRING3_PGM_LOCK:
1335 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1336 break;
1337 case VMMCALLRING3_PGM_MAP_CHUNK:
1338 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1339 break;
1340 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1341 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1342 break;
1343 case VMMCALLRING3_VMM_LOGGER_FLUSH:
1344 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
1345 break;
1346 case VMMCALLRING3_VM_SET_ERROR:
1347 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1348 break;
1349 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1350 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1351 break;
1352 case VMMCALLRING3_VM_R0_ASSERTION:
1353 default:
1354 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1355 break;
1356 }
1357 break;
1358 case VINF_PATM_DUPLICATE_FUNCTION:
1359 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1360 break;
1361 case VINF_PGM_CHANGE_MODE:
1362 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1363 break;
1364 case VINF_PGM_POOL_FLUSH_PENDING:
1365 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1366 break;
1367 case VINF_EM_PENDING_REQUEST:
1368 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1369 break;
1370 case VINF_EM_HM_PATCH_TPR_INSTR:
1371 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1372 break;
1373 default:
1374 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1375 break;
1376 }
1377}
1378#endif /* VBOX_WITH_STATISTICS */
1379
1380
1381/**
1382 * The Ring 0 entry point, called by the fast-ioctl path.
1383 *
1384 * @param pGVM The global (ring-0) VM structure.
1385 * @param pVMIgnored The cross context VM structure. The return code is
1386 * stored in pVM->vmm.s.iLastGZRc.
1387 * @param idCpu The Virtual CPU ID of the calling EMT.
1388 * @param enmOperation Which operation to execute.
1389 * @remarks Assume called with interrupts _enabled_.
1390 */
1391VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1392{
1393 RT_NOREF(pVMIgnored);
1394
1395 /*
1396 * Validation.
1397 */
1398 if ( idCpu < pGVM->cCpus
1399 && pGVM->cCpus == pGVM->cCpusUnsafe)
1400 { /*likely*/ }
1401 else
1402 {
1403 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1404 return;
1405 }
1406
1407 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1408 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1409 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1410 && pGVCpu->hNativeThreadR0 == hNativeThread))
1411 { /* likely */ }
1412 else
1413 {
1414 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1415 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1416 return;
1417 }
1418
1419 /*
1420 * SMAP fun.
1421 */
1422 VMM_CHECK_SMAP_SETUP();
1423 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1424
1425 /*
1426 * Perform requested operation.
1427 */
1428 switch (enmOperation)
1429 {
1430 /*
1431 * Run guest code using the available hardware acceleration technology.
1432 */
1433 case VMMR0_DO_HM_RUN:
1434 {
1435 for (;;) /* hlt loop */
1436 {
1437 /*
1438 * Disable preemption.
1439 */
1440 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1441 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1442 RTThreadPreemptDisable(&PreemptState);
1443 pGVCpu->vmmr0.s.pPreemptState = &PreemptState;
1444
1445 /*
1446 * Get the host CPU identifiers, make sure they are valid and that
1447 * we've got a TSC delta for the CPU.
1448 */
1449 RTCPUID idHostCpu;
1450 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1451 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1452 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1453 {
1454 pGVCpu->iHostCpuSet = iHostCpuSet;
1455 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1456
1457 /*
1458 * Update the periodic preemption timer if it's active.
1459 */
1460 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1461 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1462 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1463
1464#ifdef VMM_R0_TOUCH_FPU
1465 /*
1466 * Make sure we've got the FPU state loaded so and we don't need to clear
1467 * CR0.TS and get out of sync with the host kernel when loading the guest
1468 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1469 */
1470 CPUMR0TouchHostFpu();
1471#endif
1472 int rc;
1473 bool fPreemptRestored = false;
1474 if (!HMR0SuspendPending())
1475 {
1476 /*
1477 * Enable the context switching hook.
1478 */
1479 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1480 {
1481 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmmr0.s.hCtxHook));
1482 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmmr0.s.hCtxHook); AssertRC(rc2);
1483 }
1484
1485 /*
1486 * Enter HM context.
1487 */
1488 rc = HMR0Enter(pGVCpu);
1489 if (RT_SUCCESS(rc))
1490 {
1491 pGVCpu->vmmr0.s.fInHmContext = true;
1492 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1493
1494 /*
1495 * When preemption hooks are in place, enable preemption now that
1496 * we're in HM context.
1497 */
1498 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1499 {
1500 fPreemptRestored = true;
1501 pGVCpu->vmmr0.s.pPreemptState = NULL;
1502 RTThreadPreemptRestore(&PreemptState);
1503 }
1504
1505 /*
1506 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1507 */
1508 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1509 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);
1510 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1511
1512 /*
1513 * Assert sanity on the way out. Using manual assertions code here as normal
1514 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1515 */
1516 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1517 && RT_SUCCESS_NP(rc)
1518 && rc != VINF_VMM_CALL_HOST ))
1519 {
1520 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1521 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1522 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1523 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1524 }
1525#if 0
1526 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1527 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1528 {
1529 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1530 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1531 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1532 rc = VERR_VMM_CONTEXT_HOOK_STILL_ENABLED;
1533 }
1534#endif
1535
1536 pGVCpu->vmmr0.s.fInHmContext = false;
1537 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1538 }
1539 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1540
1541 /*
1542 * Invalidate the host CPU identifiers before we disable the context
1543 * hook / restore preemption.
1544 */
1545 pGVCpu->iHostCpuSet = UINT32_MAX;
1546 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1547
1548 /*
1549 * Disable context hooks. Due to unresolved cleanup issues, we
1550 * cannot leave the hooks enabled when we return to ring-3.
1551 *
1552 * Note! At the moment HM may also have disabled the hook
1553 * when we get here, but the IPRT API handles that.
1554 */
1555 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1556 RTThreadCtxHookDisable(pGVCpu->vmmr0.s.hCtxHook);
1557 }
1558 /*
1559 * The system is about to go into suspend mode; go back to ring 3.
1560 */
1561 else
1562 {
1563 pGVCpu->iHostCpuSet = UINT32_MAX;
1564 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1565 rc = VINF_EM_RAW_INTERRUPT;
1566 }
1567
1568 /** @todo When HM stops messing with the context hook state, we'll disable
1569 * preemption again before the RTThreadCtxHookDisable call. */
1570 if (!fPreemptRestored)
1571 {
1572 pGVCpu->vmmr0.s.pPreemptState = NULL;
1573 RTThreadPreemptRestore(&PreemptState);
1574 }
1575
1576 pGVCpu->vmm.s.iLastGZRc = rc;
1577
1578 /* Fire dtrace probe and collect statistics. */
1579 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1580#ifdef VBOX_WITH_STATISTICS
1581 vmmR0RecordRC(pGVM, pGVCpu, rc);
1582#endif
1583 /*
1584 * If this is a halt.
1585 */
1586 if (rc != VINF_EM_HALT)
1587 { /* we're not in a hurry for a HLT, so prefer this path */ }
1588 else
1589 {
1590 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1591 if (rc == VINF_SUCCESS)
1592 {
1593 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1594 continue;
1595 }
1596 pGVCpu->vmm.s.cR0HaltsToRing3++;
1597 }
1598 }
1599 /*
1600 * Invalid CPU set index or TSC delta in need of measuring.
1601 */
1602 else
1603 {
1604 pGVCpu->vmmr0.s.pPreemptState = NULL;
1605 pGVCpu->iHostCpuSet = UINT32_MAX;
1606 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1607 RTThreadPreemptRestore(&PreemptState);
1608 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1609 {
1610 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1611 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1612 0 /*default cTries*/);
1613 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1614 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1615 else
1616 pGVCpu->vmm.s.iLastGZRc = rc;
1617 }
1618 else
1619 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1620 }
1621 break;
1622
1623 } /* halt loop. */
1624 break;
1625 }
1626
1627#ifdef VBOX_WITH_NEM_R0
1628# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1629 case VMMR0_DO_NEM_RUN:
1630 {
1631 /*
1632 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1633 */
1634 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1635# ifdef VBOXSTRICTRC_STRICT_ENABLED
1636 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1637# else
1638 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1639# endif
1640 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1641 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1642
1643 pGVCpu->vmm.s.iLastGZRc = rc;
1644
1645 /*
1646 * Fire dtrace probe and collect statistics.
1647 */
1648 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1649# ifdef VBOX_WITH_STATISTICS
1650 vmmR0RecordRC(pGVM, pGVCpu, rc);
1651# endif
1652 break;
1653 }
1654# endif
1655#endif
1656
1657 /*
1658 * For profiling.
1659 */
1660 case VMMR0_DO_NOP:
1661 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1662 break;
1663
1664 /*
1665 * Shouldn't happen.
1666 */
1667 default:
1668 AssertMsgFailed(("%#x\n", enmOperation));
1669 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1670 break;
1671 }
1672 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1673}
1674
1675
1676/**
1677 * Validates a session or VM session argument.
1678 *
1679 * @returns true / false accordingly.
1680 * @param pGVM The global (ring-0) VM structure.
1681 * @param pClaimedSession The session claim to validate.
1682 * @param pSession The session argument.
1683 */
1684DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1685{
1686 /* This must be set! */
1687 if (!pSession)
1688 return false;
1689
1690 /* Only one out of the two. */
1691 if (pGVM && pClaimedSession)
1692 return false;
1693 if (pGVM)
1694 pClaimedSession = pGVM->pSession;
1695 return pClaimedSession == pSession;
1696}
1697
1698
1699/**
1700 * VMMR0EntryEx worker function, either called directly or when ever possible
1701 * called thru a longjmp so we can exit safely on failure.
1702 *
1703 * @returns VBox status code.
1704 * @param pGVM The global (ring-0) VM structure.
1705 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1706 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1707 * @param enmOperation Which operation to execute.
1708 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1709 * The support driver validates this if it's present.
1710 * @param u64Arg Some simple constant argument.
1711 * @param pSession The session of the caller.
1712 *
1713 * @remarks Assume called with interrupts _enabled_.
1714 */
1715DECL_NO_INLINE(static, int) vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1716 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1717{
1718 /*
1719 * Validate pGVM and idCpu for consistency and validity.
1720 */
1721 if (pGVM != NULL)
1722 {
1723 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0))
1724 { /* likely */ }
1725 else
1726 {
1727 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1728 return VERR_INVALID_POINTER;
1729 }
1730
1731 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1732 { /* likely */ }
1733 else
1734 {
1735 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1736 return VERR_INVALID_PARAMETER;
1737 }
1738
1739 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1740 && pGVM->enmVMState <= VMSTATE_TERMINATED
1741 && pGVM->pSession == pSession
1742 && pGVM->pSelf == pGVM))
1743 { /* likely */ }
1744 else
1745 {
1746 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1747 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1748 return VERR_INVALID_POINTER;
1749 }
1750 }
1751 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1752 { /* likely */ }
1753 else
1754 {
1755 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1756 return VERR_INVALID_PARAMETER;
1757 }
1758
1759 /*
1760 * SMAP fun.
1761 */
1762 VMM_CHECK_SMAP_SETUP();
1763 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1764
1765 /*
1766 * Process the request.
1767 */
1768 int rc;
1769 switch (enmOperation)
1770 {
1771 /*
1772 * GVM requests
1773 */
1774 case VMMR0_DO_GVMM_CREATE_VM:
1775 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1776 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1777 else
1778 rc = VERR_INVALID_PARAMETER;
1779 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1780 break;
1781
1782 case VMMR0_DO_GVMM_DESTROY_VM:
1783 if (pReqHdr == NULL && u64Arg == 0)
1784 rc = GVMMR0DestroyVM(pGVM);
1785 else
1786 rc = VERR_INVALID_PARAMETER;
1787 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1788 break;
1789
1790 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1791 if (pGVM != NULL)
1792 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1793 else
1794 rc = VERR_INVALID_PARAMETER;
1795 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1796 break;
1797
1798 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1799 if (pGVM != NULL)
1800 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1801 else
1802 rc = VERR_INVALID_PARAMETER;
1803 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1804 break;
1805
1806 case VMMR0_DO_GVMM_SCHED_HALT:
1807 if (pReqHdr)
1808 return VERR_INVALID_PARAMETER;
1809 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1810 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1811 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1812 break;
1813
1814 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1815 if (pReqHdr || u64Arg)
1816 return VERR_INVALID_PARAMETER;
1817 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1818 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1819 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1820 break;
1821
1822 case VMMR0_DO_GVMM_SCHED_POKE:
1823 if (pReqHdr || u64Arg)
1824 return VERR_INVALID_PARAMETER;
1825 rc = GVMMR0SchedPoke(pGVM, idCpu);
1826 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1827 break;
1828
1829 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1830 if (u64Arg)
1831 return VERR_INVALID_PARAMETER;
1832 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1833 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1834 break;
1835
1836 case VMMR0_DO_GVMM_SCHED_POLL:
1837 if (pReqHdr || u64Arg > 1)
1838 return VERR_INVALID_PARAMETER;
1839 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
1840 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1841 break;
1842
1843 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1844 if (u64Arg)
1845 return VERR_INVALID_PARAMETER;
1846 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1847 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1848 break;
1849
1850 case VMMR0_DO_GVMM_RESET_STATISTICS:
1851 if (u64Arg)
1852 return VERR_INVALID_PARAMETER;
1853 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1854 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1855 break;
1856
1857 /*
1858 * Initialize the R0 part of a VM instance.
1859 */
1860 case VMMR0_DO_VMMR0_INIT:
1861 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1862 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1863 break;
1864
1865 /*
1866 * Does EMT specific ring-0 init.
1867 */
1868 case VMMR0_DO_VMMR0_INIT_EMT:
1869 rc = vmmR0InitVMEmt(pGVM, idCpu);
1870 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1871 break;
1872
1873 /*
1874 * Terminate the R0 part of a VM instance.
1875 */
1876 case VMMR0_DO_VMMR0_TERM:
1877 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
1878 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1879 break;
1880
1881 /*
1882 * Attempt to enable hm mode and check the current setting.
1883 */
1884 case VMMR0_DO_HM_ENABLE:
1885 rc = HMR0EnableAllCpus(pGVM);
1886 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1887 break;
1888
1889 /*
1890 * Setup the hardware accelerated session.
1891 */
1892 case VMMR0_DO_HM_SETUP_VM:
1893 rc = HMR0SetupVM(pGVM);
1894 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1895 break;
1896
1897 /*
1898 * PGM wrappers.
1899 */
1900 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1901 if (idCpu == NIL_VMCPUID)
1902 return VERR_INVALID_CPU_ID;
1903 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
1904 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1905 break;
1906
1907 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1908 if (idCpu == NIL_VMCPUID)
1909 return VERR_INVALID_CPU_ID;
1910 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
1911 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1912 break;
1913
1914 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1915 if (idCpu == NIL_VMCPUID)
1916 return VERR_INVALID_CPU_ID;
1917 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, idCpu);
1918 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1919 break;
1920
1921 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1922 if (idCpu != 0)
1923 return VERR_INVALID_CPU_ID;
1924 rc = PGMR0PhysSetupIoMmu(pGVM);
1925 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1926 break;
1927
1928 case VMMR0_DO_PGM_POOL_GROW:
1929 if (idCpu == NIL_VMCPUID)
1930 return VERR_INVALID_CPU_ID;
1931 rc = PGMR0PoolGrow(pGVM);
1932 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1933 break;
1934
1935 /*
1936 * GMM wrappers.
1937 */
1938 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1939 if (u64Arg)
1940 return VERR_INVALID_PARAMETER;
1941 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1942 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1943 break;
1944
1945 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1946 if (u64Arg)
1947 return VERR_INVALID_PARAMETER;
1948 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1949 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1950 break;
1951
1952 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1953 if (u64Arg)
1954 return VERR_INVALID_PARAMETER;
1955 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1956 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1957 break;
1958
1959 case VMMR0_DO_GMM_FREE_PAGES:
1960 if (u64Arg)
1961 return VERR_INVALID_PARAMETER;
1962 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1963 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1964 break;
1965
1966 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1967 if (u64Arg)
1968 return VERR_INVALID_PARAMETER;
1969 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1970 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1971 break;
1972
1973 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1974 if (u64Arg)
1975 return VERR_INVALID_PARAMETER;
1976 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1977 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1978 break;
1979
1980 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1981 if (idCpu == NIL_VMCPUID)
1982 return VERR_INVALID_CPU_ID;
1983 if (u64Arg)
1984 return VERR_INVALID_PARAMETER;
1985 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1986 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1987 break;
1988
1989 case VMMR0_DO_GMM_BALLOONED_PAGES:
1990 if (u64Arg)
1991 return VERR_INVALID_PARAMETER;
1992 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1993 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1994 break;
1995
1996 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1997 if (u64Arg)
1998 return VERR_INVALID_PARAMETER;
1999 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
2000 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2001 break;
2002
2003 case VMMR0_DO_GMM_SEED_CHUNK:
2004 if (pReqHdr)
2005 return VERR_INVALID_PARAMETER;
2006 rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg);
2007 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2008 break;
2009
2010 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
2011 if (idCpu == NIL_VMCPUID)
2012 return VERR_INVALID_CPU_ID;
2013 if (u64Arg)
2014 return VERR_INVALID_PARAMETER;
2015 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
2016 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2017 break;
2018
2019 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
2020 if (idCpu == NIL_VMCPUID)
2021 return VERR_INVALID_CPU_ID;
2022 if (u64Arg)
2023 return VERR_INVALID_PARAMETER;
2024 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
2025 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2026 break;
2027
2028 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
2029 if (idCpu == NIL_VMCPUID)
2030 return VERR_INVALID_CPU_ID;
2031 if ( u64Arg
2032 || pReqHdr)
2033 return VERR_INVALID_PARAMETER;
2034 rc = GMMR0ResetSharedModules(pGVM, idCpu);
2035 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2036 break;
2037
2038#ifdef VBOX_WITH_PAGE_SHARING
2039 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
2040 {
2041 if (idCpu == NIL_VMCPUID)
2042 return VERR_INVALID_CPU_ID;
2043 if ( u64Arg
2044 || pReqHdr)
2045 return VERR_INVALID_PARAMETER;
2046 rc = GMMR0CheckSharedModules(pGVM, idCpu);
2047 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2048 break;
2049 }
2050#endif
2051
2052#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
2053 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
2054 if (u64Arg)
2055 return VERR_INVALID_PARAMETER;
2056 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
2057 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2058 break;
2059#endif
2060
2061 case VMMR0_DO_GMM_QUERY_STATISTICS:
2062 if (u64Arg)
2063 return VERR_INVALID_PARAMETER;
2064 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
2065 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2066 break;
2067
2068 case VMMR0_DO_GMM_RESET_STATISTICS:
2069 if (u64Arg)
2070 return VERR_INVALID_PARAMETER;
2071 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
2072 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2073 break;
2074
2075 /*
2076 * A quick GCFGM mock-up.
2077 */
2078 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
2079 case VMMR0_DO_GCFGM_SET_VALUE:
2080 case VMMR0_DO_GCFGM_QUERY_VALUE:
2081 {
2082 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2083 return VERR_INVALID_PARAMETER;
2084 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
2085 if (pReq->Hdr.cbReq != sizeof(*pReq))
2086 return VERR_INVALID_PARAMETER;
2087 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
2088 {
2089 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2090 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2091 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2092 }
2093 else
2094 {
2095 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2096 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2097 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2098 }
2099 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2100 break;
2101 }
2102
2103 /*
2104 * PDM Wrappers.
2105 */
2106 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2107 {
2108 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2109 return VERR_INVALID_PARAMETER;
2110 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2111 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2112 break;
2113 }
2114
2115 case VMMR0_DO_PDM_DEVICE_CREATE:
2116 {
2117 if (!pReqHdr || u64Arg || idCpu != 0)
2118 return VERR_INVALID_PARAMETER;
2119 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
2120 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2121 break;
2122 }
2123
2124 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2125 {
2126 if (!pReqHdr || u64Arg)
2127 return VERR_INVALID_PARAMETER;
2128 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr, idCpu);
2129 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2130 break;
2131 }
2132
2133 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2134 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2135 {
2136 if (!pReqHdr || u64Arg || idCpu != 0)
2137 return VERR_INVALID_PARAMETER;
2138 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2139 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2140 break;
2141 }
2142
2143 /*
2144 * Requests to the internal networking service.
2145 */
2146 case VMMR0_DO_INTNET_OPEN:
2147 {
2148 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2149 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2150 return VERR_INVALID_PARAMETER;
2151 rc = IntNetR0OpenReq(pSession, pReq);
2152 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2153 break;
2154 }
2155
2156 case VMMR0_DO_INTNET_IF_CLOSE:
2157 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2158 return VERR_INVALID_PARAMETER;
2159 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2160 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2161 break;
2162
2163
2164 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2165 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2166 return VERR_INVALID_PARAMETER;
2167 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2168 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2169 break;
2170
2171 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2172 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2173 return VERR_INVALID_PARAMETER;
2174 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2175 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2176 break;
2177
2178 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2179 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2180 return VERR_INVALID_PARAMETER;
2181 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2182 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2183 break;
2184
2185 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2186 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2187 return VERR_INVALID_PARAMETER;
2188 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2189 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2190 break;
2191
2192 case VMMR0_DO_INTNET_IF_SEND:
2193 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2194 return VERR_INVALID_PARAMETER;
2195 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2196 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2197 break;
2198
2199 case VMMR0_DO_INTNET_IF_WAIT:
2200 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2201 return VERR_INVALID_PARAMETER;
2202 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2203 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2204 break;
2205
2206 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2207 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2208 return VERR_INVALID_PARAMETER;
2209 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2210 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2211 break;
2212
2213#if 0 //def VBOX_WITH_PCI_PASSTHROUGH
2214 /*
2215 * Requests to host PCI driver service.
2216 */
2217 case VMMR0_DO_PCIRAW_REQ:
2218 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2219 return VERR_INVALID_PARAMETER;
2220 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2221 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2222 break;
2223#endif
2224
2225 /*
2226 * NEM requests.
2227 */
2228#ifdef VBOX_WITH_NEM_R0
2229# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2230 case VMMR0_DO_NEM_INIT_VM:
2231 if (u64Arg || pReqHdr || idCpu != 0)
2232 return VERR_INVALID_PARAMETER;
2233 rc = NEMR0InitVM(pGVM);
2234 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2235 break;
2236
2237 case VMMR0_DO_NEM_INIT_VM_PART_2:
2238 if (u64Arg || pReqHdr || idCpu != 0)
2239 return VERR_INVALID_PARAMETER;
2240 rc = NEMR0InitVMPart2(pGVM);
2241 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2242 break;
2243
2244 case VMMR0_DO_NEM_MAP_PAGES:
2245 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2246 return VERR_INVALID_PARAMETER;
2247 rc = NEMR0MapPages(pGVM, idCpu);
2248 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2249 break;
2250
2251 case VMMR0_DO_NEM_UNMAP_PAGES:
2252 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2253 return VERR_INVALID_PARAMETER;
2254 rc = NEMR0UnmapPages(pGVM, idCpu);
2255 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2256 break;
2257
2258 case VMMR0_DO_NEM_EXPORT_STATE:
2259 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2260 return VERR_INVALID_PARAMETER;
2261 rc = NEMR0ExportState(pGVM, idCpu);
2262 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2263 break;
2264
2265 case VMMR0_DO_NEM_IMPORT_STATE:
2266 if (pReqHdr || idCpu == NIL_VMCPUID)
2267 return VERR_INVALID_PARAMETER;
2268 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2269 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2270 break;
2271
2272 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2273 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2274 return VERR_INVALID_PARAMETER;
2275 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2276 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2277 break;
2278
2279 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2280 if (pReqHdr || idCpu == NIL_VMCPUID)
2281 return VERR_INVALID_PARAMETER;
2282 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2283 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2284 break;
2285
2286 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2287 if (u64Arg || pReqHdr)
2288 return VERR_INVALID_PARAMETER;
2289 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2290 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2291 break;
2292
2293# if 1 && defined(DEBUG_bird)
2294 case VMMR0_DO_NEM_EXPERIMENT:
2295 if (pReqHdr)
2296 return VERR_INVALID_PARAMETER;
2297 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2298 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2299 break;
2300# endif
2301# endif
2302#endif
2303
2304 /*
2305 * IOM requests.
2306 */
2307 case VMMR0_DO_IOM_GROW_IO_PORTS:
2308 {
2309 if (pReqHdr || idCpu != 0)
2310 return VERR_INVALID_PARAMETER;
2311 rc = IOMR0IoPortGrowRegistrationTables(pGVM, u64Arg);
2312 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2313 break;
2314 }
2315
2316 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2317 {
2318 if (pReqHdr || idCpu != 0)
2319 return VERR_INVALID_PARAMETER;
2320 rc = IOMR0IoPortGrowStatisticsTable(pGVM, u64Arg);
2321 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2322 break;
2323 }
2324
2325 case VMMR0_DO_IOM_GROW_MMIO_REGS:
2326 {
2327 if (pReqHdr || idCpu != 0)
2328 return VERR_INVALID_PARAMETER;
2329 rc = IOMR0MmioGrowRegistrationTables(pGVM, u64Arg);
2330 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2331 break;
2332 }
2333
2334 case VMMR0_DO_IOM_GROW_MMIO_STATS:
2335 {
2336 if (pReqHdr || idCpu != 0)
2337 return VERR_INVALID_PARAMETER;
2338 rc = IOMR0MmioGrowStatisticsTable(pGVM, u64Arg);
2339 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2340 break;
2341 }
2342
2343 case VMMR0_DO_IOM_SYNC_STATS_INDICES:
2344 {
2345 if (pReqHdr || idCpu != 0)
2346 return VERR_INVALID_PARAMETER;
2347 rc = IOMR0IoPortSyncStatisticsIndices(pGVM);
2348 if (RT_SUCCESS(rc))
2349 rc = IOMR0MmioSyncStatisticsIndices(pGVM);
2350 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2351 break;
2352 }
2353
2354 /*
2355 * DBGF requests.
2356 */
2357#ifdef VBOX_WITH_DBGF_TRACING
2358 case VMMR0_DO_DBGF_TRACER_CREATE:
2359 {
2360 if (!pReqHdr || u64Arg || idCpu != 0)
2361 return VERR_INVALID_PARAMETER;
2362 rc = DBGFR0TracerCreateReqHandler(pGVM, (PDBGFTRACERCREATEREQ)pReqHdr);
2363 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2364 break;
2365 }
2366
2367 case VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER:
2368 {
2369 if (!pReqHdr || u64Arg)
2370 return VERR_INVALID_PARAMETER;
2371# if 0 /** @todo */
2372 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu);
2373# else
2374 rc = VERR_NOT_IMPLEMENTED;
2375# endif
2376 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2377 break;
2378 }
2379#endif
2380
2381 case VMMR0_DO_DBGF_BP_INIT:
2382 {
2383 if (!pReqHdr || u64Arg || idCpu != 0)
2384 return VERR_INVALID_PARAMETER;
2385 rc = DBGFR0BpInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2386 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2387 break;
2388 }
2389
2390 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2391 {
2392 if (!pReqHdr || u64Arg || idCpu != 0)
2393 return VERR_INVALID_PARAMETER;
2394 rc = DBGFR0BpChunkAllocReqHandler(pGVM, (PDBGFBPCHUNKALLOCREQ)pReqHdr);
2395 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2396 break;
2397 }
2398
2399 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2400 {
2401 if (!pReqHdr || u64Arg || idCpu != 0)
2402 return VERR_INVALID_PARAMETER;
2403 rc = DBGFR0BpL2TblChunkAllocReqHandler(pGVM, (PDBGFBPL2TBLCHUNKALLOCREQ)pReqHdr);
2404 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2405 break;
2406 }
2407
2408 case VMMR0_DO_DBGF_BP_OWNER_INIT:
2409 {
2410 if (!pReqHdr || u64Arg || idCpu != 0)
2411 return VERR_INVALID_PARAMETER;
2412 rc = DBGFR0BpOwnerInitReqHandler(pGVM, (PDBGFBPOWNERINITREQ)pReqHdr);
2413 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2414 break;
2415 }
2416
2417 case VMMR0_DO_DBGF_BP_PORTIO_INIT:
2418 {
2419 if (!pReqHdr || u64Arg || idCpu != 0)
2420 return VERR_INVALID_PARAMETER;
2421 rc = DBGFR0BpPortIoInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2422 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2423 break;
2424 }
2425
2426
2427 /*
2428 * TM requests.
2429 */
2430 case VMMR0_DO_TM_GROW_TIMER_QUEUE:
2431 {
2432 if (pReqHdr || idCpu == NIL_VMCPUID)
2433 return VERR_INVALID_PARAMETER;
2434 rc = TMR0TimerQueueGrow(pGVM, RT_HI_U32(u64Arg), RT_LO_U32(u64Arg));
2435 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2436 break;
2437 }
2438
2439 /*
2440 * For profiling.
2441 */
2442 case VMMR0_DO_NOP:
2443 case VMMR0_DO_SLOW_NOP:
2444 return VINF_SUCCESS;
2445
2446 /*
2447 * For testing Ring-0 APIs invoked in this environment.
2448 */
2449 case VMMR0_DO_TESTS:
2450 /** @todo make new test */
2451 return VINF_SUCCESS;
2452
2453 default:
2454 /*
2455 * We're returning VERR_NOT_SUPPORT here so we've got something else
2456 * than -1 which the interrupt gate glue code might return.
2457 */
2458 Log(("operation %#x is not supported\n", enmOperation));
2459 return VERR_NOT_SUPPORTED;
2460 }
2461 return rc;
2462}
2463
2464
2465/**
2466 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2467 *
2468 * @returns VBox status code.
2469 * @param pvArgs The argument package
2470 */
2471static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2472{
2473 PGVMCPU pGVCpu = (PGVMCPU)pvArgs;
2474 return vmmR0EntryExWorker(pGVCpu->vmmr0.s.pGVM,
2475 pGVCpu->vmmr0.s.idCpu,
2476 pGVCpu->vmmr0.s.enmOperation,
2477 pGVCpu->vmmr0.s.pReq,
2478 pGVCpu->vmmr0.s.u64Arg,
2479 pGVCpu->vmmr0.s.pSession);
2480}
2481
2482
2483/**
2484 * The Ring 0 entry point, called by the support library (SUP).
2485 *
2486 * @returns VBox status code.
2487 * @param pGVM The global (ring-0) VM structure.
2488 * @param pVM The cross context VM structure.
2489 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2490 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2491 * @param enmOperation Which operation to execute.
2492 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2493 * @param u64Arg Some simple constant argument.
2494 * @param pSession The session of the caller.
2495 * @remarks Assume called with interrupts _enabled_.
2496 */
2497VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2498 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2499{
2500 /*
2501 * Requests that should only happen on the EMT thread will be
2502 * wrapped in a setjmp so we can assert without causing trouble.
2503 */
2504 if ( pVM != NULL
2505 && pGVM != NULL
2506 && pVM == pGVM /** @todo drop pVM or pGVM */
2507 && idCpu < pGVM->cCpus
2508 && pGVM->pSession == pSession
2509 && pGVM->pSelf == pVM)
2510 {
2511 switch (enmOperation)
2512 {
2513 /* These might/will be called before VMMR3Init. */
2514 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2515 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2516 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2517 case VMMR0_DO_GMM_FREE_PAGES:
2518 case VMMR0_DO_GMM_BALLOONED_PAGES:
2519 /* On the mac we might not have a valid jmp buf, so check these as well. */
2520 case VMMR0_DO_VMMR0_INIT:
2521 case VMMR0_DO_VMMR0_TERM:
2522
2523 case VMMR0_DO_PDM_DEVICE_CREATE:
2524 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2525 case VMMR0_DO_IOM_GROW_IO_PORTS:
2526 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2527 case VMMR0_DO_DBGF_BP_INIT:
2528 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2529 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2530 {
2531 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2532 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2533 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2534 && pGVCpu->hNativeThreadR0 == hNativeThread))
2535 {
2536 if (!pGVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2537 break;
2538
2539 pGVCpu->vmmr0.s.pGVM = pGVM;
2540 pGVCpu->vmmr0.s.idCpu = idCpu;
2541 pGVCpu->vmmr0.s.enmOperation = enmOperation;
2542 pGVCpu->vmmr0.s.pReq = pReq;
2543 pGVCpu->vmmr0.s.u64Arg = u64Arg;
2544 pGVCpu->vmmr0.s.pSession = pSession;
2545 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, pGVCpu,
2546 ((uintptr_t)u64Arg << 16) | (uintptr_t)enmOperation);
2547 }
2548 return VERR_VM_THREAD_NOT_EMT;
2549 }
2550
2551 default:
2552 case VMMR0_DO_PGM_POOL_GROW:
2553 break;
2554 }
2555 }
2556 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2557}
2558
2559
2560/**
2561 * Checks whether we've armed the ring-0 long jump machinery.
2562 *
2563 * @returns @c true / @c false
2564 * @param pVCpu The cross context virtual CPU structure.
2565 * @thread EMT
2566 * @sa VMMIsLongJumpArmed
2567 */
2568VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2569{
2570#ifdef RT_ARCH_X86
2571 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2572 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2573#else
2574 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2575 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2576#endif
2577}
2578
2579
2580/**
2581 * Checks whether we've done a ring-3 long jump.
2582 *
2583 * @returns @c true / @c false
2584 * @param pVCpu The cross context virtual CPU structure.
2585 * @thread EMT
2586 */
2587VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2588{
2589 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2590}
2591
2592
2593/**
2594 * Locking helper that deals with HM context and checks if the thread can block.
2595 *
2596 * @returns VINF_SUCCESS if we can block. Returns @a rcBusy or
2597 * VERR_VMM_CANNOT_BLOCK if not able to block.
2598 * @param pVCpu The cross context virtual CPU structure of the calling
2599 * thread.
2600 * @param rcBusy What to return in case of a blocking problem. Will IPE
2601 * if VINF_SUCCESS and we cannot block.
2602 * @param pszCaller The caller (for logging problems).
2603 * @param pvLock The lock address (for logging problems).
2604 * @param pCtx Where to return context info for the resume call.
2605 * @thread EMT(pVCpu)
2606 */
2607VMMR0_INT_DECL(int) VMMR0EmtPrepareToBlock(PVMCPUCC pVCpu, int rcBusy, const char *pszCaller, void *pvLock,
2608 PVMMR0EMTBLOCKCTX pCtx)
2609{
2610 const char *pszMsg;
2611
2612 /*
2613 * Check that we are allowed to block.
2614 */
2615 if (RT_LIKELY(VMMRZCallRing3IsEnabled(pVCpu)))
2616 {
2617 /*
2618 * Are we in HM context and w/o a context hook? If so work the context hook.
2619 */
2620 if (pVCpu->idHostCpu != NIL_RTCPUID)
2621 {
2622 Assert(pVCpu->iHostCpuSet != UINT32_MAX);
2623 Assert(pVCpu->vmmr0.s.fInHmContext);
2624
2625 if (pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK)
2626 {
2627 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_OUT, pVCpu);
2628 if (pVCpu->vmmr0.s.pPreemptState)
2629 RTThreadPreemptRestore(pVCpu->vmmr0.s.pPreemptState);
2630
2631 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2632 pCtx->fWasInHmContext = true;
2633 return VINF_SUCCESS;
2634 }
2635 }
2636
2637 if (RT_LIKELY(!pVCpu->vmmr0.s.pPreemptState))
2638 {
2639 /*
2640 * Not in HM context or we've got hooks, so just check that preemption
2641 * is enabled.
2642 */
2643 if (RT_LIKELY(RTThreadPreemptIsEnabled(NIL_RTTHREAD)))
2644 {
2645 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2646 pCtx->fWasInHmContext = false;
2647 return VINF_SUCCESS;
2648 }
2649 pszMsg = "Preemption is disabled!";
2650 }
2651 else
2652 pszMsg = "Preemption state w/o HM state!";
2653 }
2654 else
2655 pszMsg = "Ring-3 calls are disabled!";
2656
2657 static uint32_t volatile s_cWarnings = 0;
2658 if (++s_cWarnings < 50)
2659 SUPR0Printf("VMMR0EmtPrepareToBlock: %s pvLock=%p pszCaller=%s rcBusy=%p\n", pszMsg, pvLock, pszCaller, rcBusy);
2660 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2661 pCtx->fWasInHmContext = false;
2662 return rcBusy != VINF_SUCCESS ? rcBusy : VERR_VMM_CANNOT_BLOCK;
2663}
2664
2665
2666/**
2667 * Counterpart to VMMR0EmtPrepareToBlock.
2668 *
2669 * @param pVCpu The cross context virtual CPU structure of the calling
2670 * thread.
2671 * @param pCtx The context structure used with VMMR0EmtPrepareToBlock.
2672 * @thread EMT(pVCpu)
2673 */
2674VMMR0_INT_DECL(void) VMMR0EmtResumeAfterBlocking(PVMCPUCC pVCpu, PVMMR0EMTBLOCKCTX pCtx)
2675{
2676 AssertReturnVoid(pCtx->uMagic == VMMR0EMTBLOCKCTX_MAGIC);
2677 if (pCtx->fWasInHmContext)
2678 {
2679 if (pVCpu->vmmr0.s.pPreemptState)
2680 RTThreadPreemptDisable(pVCpu->vmmr0.s.pPreemptState);
2681
2682 pCtx->fWasInHmContext = false;
2683 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_IN, pVCpu);
2684 }
2685 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2686}
2687
2688
2689/**
2690 * Internal R0 logger worker: Flush logger.
2691 *
2692 * @param pLogger The logger instance to flush.
2693 * @remark This function must be exported!
2694 */
2695VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2696{
2697#ifdef LOG_ENABLED
2698 /*
2699 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2700 * (This is a bit paranoid code.)
2701 */
2702 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_UOFFSETOF(VMMR0LOGGER, Logger));
2703 if ( !VALID_PTR(pR0Logger)
2704 || !VALID_PTR(pR0Logger + 1)
2705 || pLogger->u32Magic != RTLOGGER_MAGIC)
2706 {
2707# ifdef DEBUG
2708 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2709# endif
2710 return;
2711 }
2712 if (pR0Logger->fFlushingDisabled)
2713 return; /* quietly */
2714
2715 PVMCC pVM = pR0Logger->pVM;
2716 if ( !VALID_PTR(pVM)
2717 || pVM->pSelf != pVM)
2718 {
2719# ifdef DEBUG
2720 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pSelf=%p! pLogger=%p\n", pVM, pVM->pSelf, pLogger);
2721# endif
2722 return;
2723 }
2724
2725 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2726 if (pVCpu)
2727 {
2728 /*
2729 * Check that the jump buffer is armed.
2730 */
2731# ifdef RT_ARCH_X86
2732 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2733 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2734# else
2735 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2736 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2737# endif
2738 {
2739# ifdef DEBUG
2740 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2741# endif
2742 return;
2743 }
2744 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2745 }
2746# ifdef DEBUG
2747 else
2748 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2749# endif
2750#else
2751 NOREF(pLogger);
2752#endif /* LOG_ENABLED */
2753}
2754
2755#ifdef LOG_ENABLED
2756
2757/**
2758 * Disables flushing of the ring-0 debug log.
2759 *
2760 * @param pVCpu The cross context virtual CPU structure.
2761 */
2762VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPUCC pVCpu)
2763{
2764 if (pVCpu->vmm.s.pR0LoggerR0)
2765 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2766 if (pVCpu->vmm.s.pR0RelLoggerR0)
2767 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = true;
2768}
2769
2770
2771/**
2772 * Enables flushing of the ring-0 debug log.
2773 *
2774 * @param pVCpu The cross context virtual CPU structure.
2775 */
2776VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPUCC pVCpu)
2777{
2778 if (pVCpu->vmm.s.pR0LoggerR0)
2779 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2780 if (pVCpu->vmm.s.pR0RelLoggerR0)
2781 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = false;
2782}
2783
2784
2785/**
2786 * Checks if log flushing is disabled or not.
2787 *
2788 * @param pVCpu The cross context virtual CPU structure.
2789 */
2790VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPUCC pVCpu)
2791{
2792 if (pVCpu->vmm.s.pR0LoggerR0)
2793 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2794 if (pVCpu->vmm.s.pR0RelLoggerR0)
2795 return pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled;
2796 return true;
2797}
2798
2799#endif /* LOG_ENABLED */
2800
2801/*
2802 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
2803 */
2804DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
2805{
2806 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
2807 if (pGVCpu)
2808 {
2809 PVMCPUCC pVCpu = pGVCpu;
2810 if (RT_VALID_PTR(pVCpu))
2811 {
2812 PVMMR0LOGGER pVmmLogger = pVCpu->vmm.s.pR0RelLoggerR0;
2813 if (RT_VALID_PTR(pVmmLogger))
2814 {
2815 if ( pVmmLogger->fCreated
2816 && pVmmLogger->pVM == pGVCpu->pGVM)
2817 {
2818 if (pVmmLogger->Logger.fFlags & RTLOGFLAGS_DISABLED)
2819 return NULL;
2820 uint16_t const fFlags = RT_LO_U16(fFlagsAndGroup);
2821 uint16_t const iGroup = RT_HI_U16(fFlagsAndGroup);
2822 if ( iGroup != UINT16_MAX
2823 && ( ( pVmmLogger->Logger.afGroups[iGroup < pVmmLogger->Logger.cGroups ? iGroup : 0]
2824 & (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED))
2825 != (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED)))
2826 return NULL;
2827 return &pVmmLogger->Logger;
2828 }
2829 }
2830 }
2831 }
2832 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
2833}
2834
2835
2836/*
2837 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2838 *
2839 * @returns true if the breakpoint should be hit, false if it should be ignored.
2840 */
2841DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2842{
2843#if 0
2844 return true;
2845#else
2846 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2847 if (pVM)
2848 {
2849 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2850
2851 if (pVCpu)
2852 {
2853# ifdef RT_ARCH_X86
2854 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2855 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2856# else
2857 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2858 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2859# endif
2860 {
2861 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2862 return RT_FAILURE_NP(rc);
2863 }
2864 }
2865 }
2866# ifdef RT_OS_LINUX
2867 return true;
2868# else
2869 return false;
2870# endif
2871#endif
2872}
2873
2874
2875/*
2876 * Override this so we can push it up to ring-3.
2877 */
2878DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2879{
2880 /*
2881 * To the log.
2882 */
2883 LogAlways(("\n!!R0-Assertion Failed!!\n"
2884 "Expression: %s\n"
2885 "Location : %s(%d) %s\n",
2886 pszExpr, pszFile, uLine, pszFunction));
2887
2888 /*
2889 * To the global VMM buffer.
2890 */
2891 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2892 if (pVM)
2893 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2894 "\n!!R0-Assertion Failed!!\n"
2895 "Expression: %.*s\n"
2896 "Location : %s(%d) %s\n",
2897 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2898 pszFile, uLine, pszFunction);
2899
2900 /*
2901 * Continue the normal way.
2902 */
2903 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2904}
2905
2906
2907/**
2908 * Callback for RTLogFormatV which writes to the ring-3 log port.
2909 * See PFNLOGOUTPUT() for details.
2910 */
2911static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2912{
2913 for (size_t i = 0; i < cbChars; i++)
2914 {
2915 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2916 }
2917
2918 NOREF(pv);
2919 return cbChars;
2920}
2921
2922
2923/*
2924 * Override this so we can push it up to ring-3.
2925 */
2926DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2927{
2928 va_list vaCopy;
2929
2930 /*
2931 * Push the message to the loggers.
2932 */
2933 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2934 if (pLog)
2935 {
2936 va_copy(vaCopy, va);
2937 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2938 va_end(vaCopy);
2939 }
2940 pLog = RTLogRelGetDefaultInstance();
2941 if (pLog)
2942 {
2943 va_copy(vaCopy, va);
2944 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2945 va_end(vaCopy);
2946 }
2947
2948 /*
2949 * Push it to the global VMM buffer.
2950 */
2951 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2952 if (pVM)
2953 {
2954 va_copy(vaCopy, va);
2955 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2956 va_end(vaCopy);
2957 }
2958
2959 /*
2960 * Continue the normal way.
2961 */
2962 RTAssertMsg2V(pszFormat, va);
2963}
2964
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette