VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 90379

Last change on this file since 90379 was 90379, checked in by vboxsync, 4 years ago

VMM: Implementing blocking on critical sections in ring-0 HM context (actual code is disabled). bugref:6695

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 109.2 KB
Line 
1/* $Id: VMMR0.cpp 90379 2021-07-28 20:00:43Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_NEM_R0
31# include <VBox/vmm/nem.h>
32#endif
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/stam.h>
35#include <VBox/vmm/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvm.h>
39#ifdef VBOX_WITH_PCI_PASSTHROUGH
40# include <VBox/vmm/pdmpci.h>
41#endif
42#include <VBox/vmm/apic.h>
43
44#include <VBox/vmm/gvmm.h>
45#include <VBox/vmm/gmm.h>
46#include <VBox/vmm/gim.h>
47#include <VBox/intnet.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/param.h>
50#include <VBox/err.h>
51#include <VBox/version.h>
52#include <VBox/log.h>
53
54#include <iprt/asm-amd64-x86.h>
55#include <iprt/assert.h>
56#include <iprt/crc.h>
57#include <iprt/mp.h>
58#include <iprt/once.h>
59#include <iprt/stdarg.h>
60#include <iprt/string.h>
61#include <iprt/thread.h>
62#include <iprt/timer.h>
63#include <iprt/time.h>
64
65#include "dtrace/VBoxVMM.h"
66
67
68#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
69# pragma intrinsic(_AddressOfReturnAddress)
70#endif
71
72#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
73# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
74#endif
75
76
77
78/*********************************************************************************************************************************
79* Defined Constants And Macros *
80*********************************************************************************************************************************/
81/** @def VMM_CHECK_SMAP_SETUP
82 * SMAP check setup. */
83/** @def VMM_CHECK_SMAP_CHECK
84 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
85 * it will be logged and @a a_BadExpr is executed. */
86/** @def VMM_CHECK_SMAP_CHECK2
87 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
88 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
89 * executed. */
90#if (defined(VBOX_STRICT) || 1) && !defined(VBOX_WITH_RAM_IN_KERNEL)
91# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
92# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
93 do { \
94 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
95 { \
96 RTCCUINTREG fEflCheck = ASMGetFlags(); \
97 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
98 { /* likely */ } \
99 else \
100 { \
101 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
102 a_BadExpr; \
103 } \
104 } \
105 } while (0)
106# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \
107 do { \
108 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
109 { \
110 RTCCUINTREG fEflCheck = ASMGetFlags(); \
111 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
112 { /* likely */ } \
113 else if (a_pGVM) \
114 { \
115 SUPR0BadContext((a_pGVM)->pSession, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
116 RTStrPrintf((a_pGVM)->vmm.s.szRing0AssertMsg1, sizeof((a_pGVM)->vmm.s.szRing0AssertMsg1), \
117 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
118 a_BadExpr; \
119 } \
120 else \
121 { \
122 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
123 a_BadExpr; \
124 } \
125 } \
126 } while (0)
127#else
128# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
129# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
130# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) NOREF(fKernelFeatures)
131#endif
132
133
134/*********************************************************************************************************************************
135* Internal Functions *
136*********************************************************************************************************************************/
137RT_C_DECLS_BEGIN
138#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
139extern uint64_t __udivdi3(uint64_t, uint64_t);
140extern uint64_t __umoddi3(uint64_t, uint64_t);
141#endif
142RT_C_DECLS_END
143
144
145/*********************************************************************************************************************************
146* Global Variables *
147*********************************************************************************************************************************/
148/** Drag in necessary library bits.
149 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
150struct CLANG11WEIRDNOTHROW { PFNRT pfn; } g_VMMR0Deps[] =
151{
152 { (PFNRT)RTCrc32 },
153 { (PFNRT)RTOnce },
154#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
155 { (PFNRT)__udivdi3 },
156 { (PFNRT)__umoddi3 },
157#endif
158 { NULL }
159};
160
161#ifdef RT_OS_SOLARIS
162/* Dependency information for the native solaris loader. */
163extern "C" { char _depends_on[] = "vboxdrv"; }
164#endif
165
166
167/**
168 * Initialize the module.
169 * This is called when we're first loaded.
170 *
171 * @returns 0 on success.
172 * @returns VBox status on failure.
173 * @param hMod Image handle for use in APIs.
174 */
175DECLEXPORT(int) ModuleInit(void *hMod)
176{
177 VMM_CHECK_SMAP_SETUP();
178 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
179
180#ifdef VBOX_WITH_DTRACE_R0
181 /*
182 * The first thing to do is register the static tracepoints.
183 * (Deregistration is automatic.)
184 */
185 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
186 if (RT_FAILURE(rc2))
187 return rc2;
188#endif
189 LogFlow(("ModuleInit:\n"));
190
191#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
192 /*
193 * Display the CMOS debug code.
194 */
195 ASMOutU8(0x72, 0x03);
196 uint8_t bDebugCode = ASMInU8(0x73);
197 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
198 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
199#endif
200
201 /*
202 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
203 */
204 int rc = vmmInitFormatTypes();
205 if (RT_SUCCESS(rc))
206 {
207 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
208 rc = GVMMR0Init();
209 if (RT_SUCCESS(rc))
210 {
211 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
212 rc = GMMR0Init();
213 if (RT_SUCCESS(rc))
214 {
215 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
216 rc = HMR0Init();
217 if (RT_SUCCESS(rc))
218 {
219 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
220
221 PDMR0Init(hMod);
222 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
223
224 rc = PGMRegisterStringFormatTypes();
225 if (RT_SUCCESS(rc))
226 {
227 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
228#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
229 rc = PGMR0DynMapInit();
230#endif
231 if (RT_SUCCESS(rc))
232 {
233 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
234 rc = IntNetR0Init();
235 if (RT_SUCCESS(rc))
236 {
237#ifdef VBOX_WITH_PCI_PASSTHROUGH
238 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
239 rc = PciRawR0Init();
240#endif
241 if (RT_SUCCESS(rc))
242 {
243 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
244 rc = CPUMR0ModuleInit();
245 if (RT_SUCCESS(rc))
246 {
247#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
248 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
249 rc = vmmR0TripleFaultHackInit();
250 if (RT_SUCCESS(rc))
251#endif
252 {
253 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
254 if (RT_SUCCESS(rc))
255 {
256 LogFlow(("ModuleInit: returns success\n"));
257 return VINF_SUCCESS;
258 }
259 }
260
261 /*
262 * Bail out.
263 */
264#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
265 vmmR0TripleFaultHackTerm();
266#endif
267 }
268 else
269 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
270#ifdef VBOX_WITH_PCI_PASSTHROUGH
271 PciRawR0Term();
272#endif
273 }
274 else
275 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
276 IntNetR0Term();
277 }
278 else
279 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
280#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
281 PGMR0DynMapTerm();
282#endif
283 }
284 else
285 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
286 PGMDeregisterStringFormatTypes();
287 }
288 else
289 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
290 HMR0Term();
291 }
292 else
293 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
294 GMMR0Term();
295 }
296 else
297 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
298 GVMMR0Term();
299 }
300 else
301 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
302 vmmTermFormatTypes();
303 }
304 else
305 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
306
307 LogFlow(("ModuleInit: failed %Rrc\n", rc));
308 return rc;
309}
310
311
312/**
313 * Terminate the module.
314 * This is called when we're finally unloaded.
315 *
316 * @param hMod Image handle for use in APIs.
317 */
318DECLEXPORT(void) ModuleTerm(void *hMod)
319{
320 NOREF(hMod);
321 LogFlow(("ModuleTerm:\n"));
322
323 /*
324 * Terminate the CPUM module (Local APIC cleanup).
325 */
326 CPUMR0ModuleTerm();
327
328 /*
329 * Terminate the internal network service.
330 */
331 IntNetR0Term();
332
333 /*
334 * PGM (Darwin), HM and PciRaw global cleanup.
335 */
336#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
337 PGMR0DynMapTerm();
338#endif
339#ifdef VBOX_WITH_PCI_PASSTHROUGH
340 PciRawR0Term();
341#endif
342 PGMDeregisterStringFormatTypes();
343 HMR0Term();
344#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
345 vmmR0TripleFaultHackTerm();
346#endif
347
348 /*
349 * Destroy the GMM and GVMM instances.
350 */
351 GMMR0Term();
352 GVMMR0Term();
353
354 vmmTermFormatTypes();
355
356 LogFlow(("ModuleTerm: returns\n"));
357}
358
359
360/**
361 * Initializes VMM specific members when the GVM structure is created.
362 *
363 * @param pGVM The global (ring-0) VM structure.
364 */
365VMMR0_INT_DECL(void) VMMR0InitPerVMData(PGVM pGVM)
366{
367 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
368 {
369 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
370 pGVCpu->vmmr0.s.idHostCpu = NIL_RTCPUID;
371 pGVCpu->vmmr0.s.iHostCpuSet = UINT32_MAX;
372 pGVCpu->vmmr0.s.fInHmContext = false;
373 pGVCpu->vmmr0.s.pPreemptState = NULL;
374 pGVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
375 }
376}
377
378
379/**
380 * Initiates the R0 driver for a particular VM instance.
381 *
382 * @returns VBox status code.
383 *
384 * @param pGVM The global (ring-0) VM structure.
385 * @param uSvnRev The SVN revision of the ring-3 part.
386 * @param uBuildType Build type indicator.
387 * @thread EMT(0)
388 */
389static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
390{
391 VMM_CHECK_SMAP_SETUP();
392 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
393
394 /*
395 * Match the SVN revisions and build type.
396 */
397 if (uSvnRev != VMMGetSvnRev())
398 {
399 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
400 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
401 return VERR_VMM_R0_VERSION_MISMATCH;
402 }
403 if (uBuildType != vmmGetBuildType())
404 {
405 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
406 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
407 return VERR_VMM_R0_VERSION_MISMATCH;
408 }
409
410 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
411 if (RT_FAILURE(rc))
412 return rc;
413
414#ifdef LOG_ENABLED
415 /*
416 * Register the EMT R0 logger instance for VCPU 0.
417 */
418 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
419
420 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
421 if (pR0Logger)
422 {
423# if 0 /* testing of the logger. */
424 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
425 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
426 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
427 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
428
429 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
430 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
431 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
432 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
433
434 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
435 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
436 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
437 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
438
439 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
440 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
441 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
442 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
443 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
444 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
445
446 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
447 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
448
449 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
450 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
451 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
452# endif
453 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pGVM->pSession));
454 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
455 pR0Logger->fRegistered = true;
456 }
457#endif /* LOG_ENABLED */
458
459 /*
460 * Check if the host supports high resolution timers or not.
461 */
462 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
463 && !RTTimerCanDoHighResolution())
464 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
465
466 /*
467 * Initialize the per VM data for GVMM and GMM.
468 */
469 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
470 rc = GVMMR0InitVM(pGVM);
471 if (RT_SUCCESS(rc))
472 {
473 /*
474 * Init HM, CPUM and PGM (Darwin only).
475 */
476 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
477 rc = HMR0InitVM(pGVM);
478 if (RT_SUCCESS(rc))
479 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
480 if (RT_SUCCESS(rc))
481 {
482 rc = CPUMR0InitVM(pGVM);
483 if (RT_SUCCESS(rc))
484 {
485 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
486 rc = PGMR0InitVM(pGVM);
487 if (RT_SUCCESS(rc))
488 {
489 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
490 rc = EMR0InitVM(pGVM);
491 if (RT_SUCCESS(rc))
492 {
493 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
494#ifdef VBOX_WITH_PCI_PASSTHROUGH
495 rc = PciRawR0InitVM(pGVM);
496#endif
497 if (RT_SUCCESS(rc))
498 {
499 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
500 rc = GIMR0InitVM(pGVM);
501 if (RT_SUCCESS(rc))
502 {
503 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION);
504 if (RT_SUCCESS(rc))
505 {
506 GVMMR0DoneInitVM(pGVM);
507
508 /*
509 * Collect a bit of info for the VM release log.
510 */
511 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
512 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
513
514 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
515 return rc;
516 }
517
518 /* bail out*/
519 GIMR0TermVM(pGVM);
520 }
521#ifdef VBOX_WITH_PCI_PASSTHROUGH
522 PciRawR0TermVM(pGVM);
523#endif
524 }
525 }
526 }
527 }
528 HMR0TermVM(pGVM);
529 }
530 }
531
532 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
533 return rc;
534}
535
536
537/**
538 * Does EMT specific VM initialization.
539 *
540 * @returns VBox status code.
541 * @param pGVM The ring-0 VM structure.
542 * @param idCpu The EMT that's calling.
543 */
544static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
545{
546 /* Paranoia (caller checked these already). */
547 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
548 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
549
550#ifdef LOG_ENABLED
551 /*
552 * Registration of ring 0 loggers.
553 */
554 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
555 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
556 if ( pR0Logger
557 && !pR0Logger->fRegistered)
558 {
559 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
560 pR0Logger->fRegistered = true;
561 }
562#endif
563
564 return VINF_SUCCESS;
565}
566
567
568
569/**
570 * Terminates the R0 bits for a particular VM instance.
571 *
572 * This is normally called by ring-3 as part of the VM termination process, but
573 * may alternatively be called during the support driver session cleanup when
574 * the VM object is destroyed (see GVMM).
575 *
576 * @returns VBox status code.
577 *
578 * @param pGVM The global (ring-0) VM structure.
579 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
580 * thread.
581 * @thread EMT(0) or session clean up thread.
582 */
583VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
584{
585 /*
586 * Check EMT(0) claim if we're called from userland.
587 */
588 if (idCpu != NIL_VMCPUID)
589 {
590 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
591 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
592 if (RT_FAILURE(rc))
593 return rc;
594 }
595
596#ifdef VBOX_WITH_PCI_PASSTHROUGH
597 PciRawR0TermVM(pGVM);
598#endif
599
600 /*
601 * Tell GVMM what we're up to and check that we only do this once.
602 */
603 if (GVMMR0DoingTermVM(pGVM))
604 {
605 GIMR0TermVM(pGVM);
606
607 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
608 * here to make sure we don't leak any shared pages if we crash... */
609#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
610 PGMR0DynMapTermVM(pGVM);
611#endif
612 HMR0TermVM(pGVM);
613 }
614
615 /*
616 * Deregister the logger.
617 */
618 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
619 return VINF_SUCCESS;
620}
621
622
623/**
624 * An interrupt or unhalt force flag is set, deal with it.
625 *
626 * @returns VINF_SUCCESS (or VINF_EM_HALT).
627 * @param pVCpu The cross context virtual CPU structure.
628 * @param uMWait Result from EMMonitorWaitIsActive().
629 * @param enmInterruptibility Guest CPU interruptbility level.
630 */
631static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
632{
633 Assert(!TRPMHasTrap(pVCpu));
634 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
635 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
636
637 /*
638 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
639 */
640 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
641 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
642 {
643 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
644 {
645 uint8_t u8Interrupt = 0;
646 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
647 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
648 if (RT_SUCCESS(rc))
649 {
650 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
651
652 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
653 AssertRCSuccess(rc);
654 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
655 return rc;
656 }
657 }
658 }
659 /*
660 * SMI is not implemented yet, at least not here.
661 */
662 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
663 {
664 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #3\n", pVCpu->idCpu));
665 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
666 return VINF_EM_HALT;
667 }
668 /*
669 * NMI.
670 */
671 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
672 {
673 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
674 {
675 /** @todo later. */
676 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #2 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
677 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
678 return VINF_EM_HALT;
679 }
680 }
681 /*
682 * Nested-guest virtual interrupt.
683 */
684 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
685 {
686 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
687 {
688 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
689 * here before injecting the virtual interrupt. See emR3ForcedActions
690 * for details. */
691 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #1 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
692 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
693 return VINF_EM_HALT;
694 }
695 }
696
697 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
698 {
699 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
700 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (UNHALT)\n", pVCpu->idCpu));
701 return VINF_SUCCESS;
702 }
703 if (uMWait > 1)
704 {
705 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
706 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (uMWait=%u > 1)\n", pVCpu->idCpu, uMWait));
707 return VINF_SUCCESS;
708 }
709
710 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #0 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
711 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
712 return VINF_EM_HALT;
713}
714
715
716/**
717 * This does one round of vmR3HaltGlobal1Halt().
718 *
719 * The rational here is that we'll reduce latency in interrupt situations if we
720 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
721 * MWAIT), but do one round of blocking here instead and hope the interrupt is
722 * raised in the meanwhile.
723 *
724 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
725 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
726 * ring-0 call (unless we're too close to a timer event). When the interrupt
727 * wakes us up, we'll return from ring-0 and EM will by instinct do a
728 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
729 * back to VMMR0EntryFast().
730 *
731 * @returns VINF_SUCCESS or VINF_EM_HALT.
732 * @param pGVM The ring-0 VM structure.
733 * @param pGVCpu The ring-0 virtual CPU structure.
734 *
735 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
736 * the VM module, probably to VMM. Then this would be more weird wrt
737 * parameters and statistics.
738 */
739static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
740{
741 /*
742 * Do spin stat historization.
743 */
744 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
745 { /* likely */ }
746 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
747 {
748 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
749 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
750 }
751 else
752 {
753 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
754 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
755 }
756
757 /*
758 * Flags that makes us go to ring-3.
759 */
760 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
761 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
762 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
763 | VM_FF_PGM_NO_MEMORY | VM_FF_DEBUG_SUSPEND;
764 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
765 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
766 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
767 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
768
769 /*
770 * Check preconditions.
771 */
772 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
773 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
774 if ( pGVCpu->vmm.s.fMayHaltInRing0
775 && !TRPMHasTrap(pGVCpu)
776 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
777 || uMWait > 1))
778 {
779 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
780 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
781 {
782 /*
783 * Interrupts pending already?
784 */
785 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
786 APICUpdatePendingInterrupts(pGVCpu);
787
788 /*
789 * Flags that wake up from the halted state.
790 */
791 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
792 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
793
794 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
795 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
796 ASMNopPause();
797
798 /*
799 * Check out how long till the next timer event.
800 */
801 uint64_t u64Delta;
802 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
803
804 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
805 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
806 {
807 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
808 APICUpdatePendingInterrupts(pGVCpu);
809
810 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
811 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
812
813 /*
814 * Wait if there is enough time to the next timer event.
815 */
816 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
817 {
818 /* If there are few other CPU cores around, we will procrastinate a
819 little before going to sleep, hoping for some device raising an
820 interrupt or similar. Though, the best thing here would be to
821 dynamically adjust the spin count according to its usfulness or
822 something... */
823 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
824 && RTMpGetOnlineCount() >= 4)
825 {
826 /** @todo Figure out how we can skip this if it hasn't help recently...
827 * @bugref{9172#c12} */
828 uint32_t cSpinLoops = 42;
829 while (cSpinLoops-- > 0)
830 {
831 ASMNopPause();
832 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
833 APICUpdatePendingInterrupts(pGVCpu);
834 ASMNopPause();
835 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
836 {
837 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
838 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
839 return VINF_EM_HALT;
840 }
841 ASMNopPause();
842 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
843 {
844 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
845 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
846 return VINF_EM_HALT;
847 }
848 ASMNopPause();
849 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
850 {
851 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
852 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
853 }
854 ASMNopPause();
855 }
856 }
857
858 /*
859 * We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
860 * knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here).
861 * After changing the state we must recheck the force flags of course.
862 */
863 if (VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED))
864 {
865 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
866 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
867 {
868 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
869 APICUpdatePendingInterrupts(pGVCpu);
870
871 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
872 {
873 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
874 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
875 }
876
877 /* Okay, block! */
878 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
879 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
880 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
881 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
882 Log10(("vmmR0DoHalt: CPU%d: halted %llu ns\n", pGVCpu->idCpu, cNsElapsedSchedHalt));
883
884 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
885 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
886 if ( rc == VINF_SUCCESS
887 || rc == VERR_INTERRUPTED)
888 {
889 /* Keep some stats like ring-3 does. */
890 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
891 if (cNsOverslept > 50000)
892 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
893 else if (cNsOverslept < -50000)
894 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
895 else
896 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
897
898 /*
899 * Recheck whether we can resume execution or have to go to ring-3.
900 */
901 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
902 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
903 {
904 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
905 APICUpdatePendingInterrupts(pGVCpu);
906 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
907 {
908 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
909 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
910 }
911 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostNoInt);
912 Log12(("vmmR0DoHalt: CPU%d post #2 - No pending interrupt\n", pGVCpu->idCpu));
913 }
914 else
915 {
916 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostPendingFF);
917 Log12(("vmmR0DoHalt: CPU%d post #1 - Pending FF\n", pGVCpu->idCpu));
918 }
919 }
920 else
921 {
922 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
923 Log12(("vmmR0DoHalt: CPU%d GVMMR0SchedHalt failed: %Rrc\n", pGVCpu->idCpu, rc));
924 }
925 }
926 else
927 {
928 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
929 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
930 Log12(("vmmR0DoHalt: CPU%d failed #5 - Pending FF\n", pGVCpu->idCpu));
931 }
932 }
933 else
934 {
935 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
936 Log12(("vmmR0DoHalt: CPU%d failed #4 - enmState=%d\n", pGVCpu->idCpu, VMCPU_GET_STATE(pGVCpu)));
937 }
938 }
939 else
940 {
941 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3SmallDelta);
942 Log12(("vmmR0DoHalt: CPU%d failed #3 - delta too small: %RU64\n", pGVCpu->idCpu, u64Delta));
943 }
944 }
945 else
946 {
947 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
948 Log12(("vmmR0DoHalt: CPU%d failed #2 - Pending FF\n", pGVCpu->idCpu));
949 }
950 }
951 else
952 {
953 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
954 Log12(("vmmR0DoHalt: CPU%d failed #1 - Pending FF\n", pGVCpu->idCpu));
955 }
956 }
957 else
958 {
959 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
960 Log12(("vmmR0DoHalt: CPU%d failed #0 - fMayHaltInRing0=%d TRPMHasTrap=%d enmInt=%d uMWait=%u\n",
961 pGVCpu->idCpu, pGVCpu->vmm.s.fMayHaltInRing0, TRPMHasTrap(pGVCpu), enmInterruptibility, uMWait));
962 }
963
964 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
965 return VINF_EM_HALT;
966}
967
968
969/**
970 * VMM ring-0 thread-context callback.
971 *
972 * This does common HM state updating and calls the HM-specific thread-context
973 * callback.
974 *
975 * This is used together with RTThreadCtxHookCreate() on platforms which
976 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
977 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
978 *
979 * @param enmEvent The thread-context event.
980 * @param pvUser Opaque pointer to the VMCPU.
981 *
982 * @thread EMT(pvUser)
983 */
984static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
985{
986 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
987
988 switch (enmEvent)
989 {
990 case RTTHREADCTXEVENT_IN:
991 {
992 /*
993 * Linux may call us with preemption enabled (really!) but technically we
994 * cannot get preempted here, otherwise we end up in an infinite recursion
995 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
996 * ad infinitum). Let's just disable preemption for now...
997 */
998 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
999 * preemption after doing the callout (one or two functions up the
1000 * call chain). */
1001 /** @todo r=ramshankar: See @bugref{5313#c30}. */
1002 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1003 RTThreadPreemptDisable(&ParanoidPreemptState);
1004
1005 /* We need to update the VCPU <-> host CPU mapping. */
1006 RTCPUID idHostCpu;
1007 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1008 pVCpu->vmmr0.s.iHostCpuSet = iHostCpuSet;
1009 ASMAtomicWriteU32(&pVCpu->vmmr0.s.idHostCpu, idHostCpu);
1010 pVCpu->iHostCpuSet = iHostCpuSet;
1011 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1012
1013 /* In the very unlikely event that the GIP delta for the CPU we're
1014 rescheduled needs calculating, try force a return to ring-3.
1015 We unfortunately cannot do the measurements right here. */
1016 if (RT_LIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1017 { /* likely */ }
1018 else
1019 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1020
1021 /* Invoke the HM-specific thread-context callback. */
1022 HMR0ThreadCtxCallback(enmEvent, pvUser);
1023
1024 /* Restore preemption. */
1025 RTThreadPreemptRestore(&ParanoidPreemptState);
1026 break;
1027 }
1028
1029 case RTTHREADCTXEVENT_OUT:
1030 {
1031 /* Invoke the HM-specific thread-context callback. */
1032 HMR0ThreadCtxCallback(enmEvent, pvUser);
1033
1034 /*
1035 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
1036 * have the same host CPU associated with it.
1037 */
1038 pVCpu->vmmr0.s.iHostCpuSet = UINT32_MAX;
1039 ASMAtomicWriteU32(&pVCpu->vmmr0.s.idHostCpu, NIL_RTCPUID);
1040 pVCpu->iHostCpuSet = UINT32_MAX;
1041 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1042 break;
1043 }
1044
1045 default:
1046 /* Invoke the HM-specific thread-context callback. */
1047 HMR0ThreadCtxCallback(enmEvent, pvUser);
1048 break;
1049 }
1050}
1051
1052
1053/**
1054 * Creates thread switching hook for the current EMT thread.
1055 *
1056 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
1057 * platform does not implement switcher hooks, no hooks will be create and the
1058 * member set to NIL_RTTHREADCTXHOOK.
1059 *
1060 * @returns VBox status code.
1061 * @param pVCpu The cross context virtual CPU structure.
1062 * @thread EMT(pVCpu)
1063 */
1064VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
1065{
1066 VMCPU_ASSERT_EMT(pVCpu);
1067 Assert(pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK);
1068
1069#if 1 /* To disable this stuff change to zero. */
1070 int rc = RTThreadCtxHookCreate(&pVCpu->vmmr0.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
1071 if (RT_SUCCESS(rc))
1072 {
1073 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = true;
1074 return rc;
1075 }
1076#else
1077 RT_NOREF(vmmR0ThreadCtxCallback);
1078 int rc = VERR_NOT_SUPPORTED;
1079#endif
1080
1081 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1082 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = false;
1083 if (rc == VERR_NOT_SUPPORTED)
1084 return VINF_SUCCESS;
1085
1086 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
1087 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
1088}
1089
1090
1091/**
1092 * Destroys the thread switching hook for the specified VCPU.
1093 *
1094 * @param pVCpu The cross context virtual CPU structure.
1095 * @remarks Can be called from any thread.
1096 */
1097VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
1098{
1099 int rc = RTThreadCtxHookDestroy(pVCpu->vmmr0.s.hCtxHook);
1100 AssertRC(rc);
1101 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1102}
1103
1104
1105/**
1106 * Disables the thread switching hook for this VCPU (if we got one).
1107 *
1108 * @param pVCpu The cross context virtual CPU structure.
1109 * @thread EMT(pVCpu)
1110 *
1111 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
1112 * this call. This means you have to be careful with what you do!
1113 */
1114VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1115{
1116 /*
1117 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1118 * @bugref{7726#c19} explains the need for this trick:
1119 *
1120 * VMXR0CallRing3Callback/SVMR0CallRing3Callback &
1121 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1122 * longjmp & normal return to ring-3, which opens a window where we may be
1123 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
1124 * the CPU starts executing a different EMT. Both functions first disables
1125 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1126 * an opening for getting preempted.
1127 */
1128 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1129 * all the time. */
1130 /** @todo move this into the context hook disabling if(). */
1131 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1132
1133 /*
1134 * Disable the context hook, if we got one.
1135 */
1136 if (pVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1137 {
1138 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1139 ASMAtomicWriteU32(&pVCpu->vmmr0.s.idHostCpu, NIL_RTCPUID);
1140 int rc = RTThreadCtxHookDisable(pVCpu->vmmr0.s.hCtxHook);
1141 AssertRC(rc);
1142 }
1143}
1144
1145
1146/**
1147 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1148 *
1149 * @returns true if registered, false otherwise.
1150 * @param pVCpu The cross context virtual CPU structure.
1151 */
1152DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1153{
1154 return RTThreadCtxHookIsEnabled(pVCpu->vmmr0.s.hCtxHook);
1155}
1156
1157
1158/**
1159 * Whether thread-context hooks are registered for this VCPU.
1160 *
1161 * @returns true if registered, false otherwise.
1162 * @param pVCpu The cross context virtual CPU structure.
1163 */
1164VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1165{
1166 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1167}
1168
1169
1170/**
1171 * Returns the ring-0 release logger instance.
1172 *
1173 * @returns Pointer to release logger, NULL if not configured.
1174 * @param pVCpu The cross context virtual CPU structure of the caller.
1175 * @thread EMT(pVCpu)
1176 */
1177VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu)
1178{
1179 PVMMR0LOGGER pLogger = pVCpu->vmm.s.pR0RelLoggerR0;
1180 if (pLogger)
1181 return &pLogger->Logger;
1182 return NULL;
1183}
1184
1185
1186#ifdef VBOX_WITH_STATISTICS
1187/**
1188 * Record return code statistics
1189 * @param pVM The cross context VM structure.
1190 * @param pVCpu The cross context virtual CPU structure.
1191 * @param rc The status code.
1192 */
1193static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1194{
1195 /*
1196 * Collect statistics.
1197 */
1198 switch (rc)
1199 {
1200 case VINF_SUCCESS:
1201 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1202 break;
1203 case VINF_EM_RAW_INTERRUPT:
1204 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1205 break;
1206 case VINF_EM_RAW_INTERRUPT_HYPER:
1207 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1208 break;
1209 case VINF_EM_RAW_GUEST_TRAP:
1210 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1211 break;
1212 case VINF_EM_RAW_RING_SWITCH:
1213 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1214 break;
1215 case VINF_EM_RAW_RING_SWITCH_INT:
1216 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1217 break;
1218 case VINF_EM_RAW_STALE_SELECTOR:
1219 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1220 break;
1221 case VINF_EM_RAW_IRET_TRAP:
1222 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1223 break;
1224 case VINF_IOM_R3_IOPORT_READ:
1225 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1226 break;
1227 case VINF_IOM_R3_IOPORT_WRITE:
1228 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1229 break;
1230 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1231 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1232 break;
1233 case VINF_IOM_R3_MMIO_READ:
1234 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1235 break;
1236 case VINF_IOM_R3_MMIO_WRITE:
1237 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1238 break;
1239 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1240 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1241 break;
1242 case VINF_IOM_R3_MMIO_READ_WRITE:
1243 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1244 break;
1245 case VINF_PATM_HC_MMIO_PATCH_READ:
1246 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1247 break;
1248 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1249 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1250 break;
1251 case VINF_CPUM_R3_MSR_READ:
1252 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1253 break;
1254 case VINF_CPUM_R3_MSR_WRITE:
1255 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1256 break;
1257 case VINF_EM_RAW_EMULATE_INSTR:
1258 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1259 break;
1260 case VINF_PATCH_EMULATE_INSTR:
1261 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1262 break;
1263 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1264 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1265 break;
1266 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1267 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1268 break;
1269 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1270 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1271 break;
1272 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1273 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1274 break;
1275 case VINF_CSAM_PENDING_ACTION:
1276 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1277 break;
1278 case VINF_PGM_SYNC_CR3:
1279 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1280 break;
1281 case VINF_PATM_PATCH_INT3:
1282 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1283 break;
1284 case VINF_PATM_PATCH_TRAP_PF:
1285 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1286 break;
1287 case VINF_PATM_PATCH_TRAP_GP:
1288 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1289 break;
1290 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1291 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1292 break;
1293 case VINF_EM_RESCHEDULE_REM:
1294 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1295 break;
1296 case VINF_EM_RAW_TO_R3:
1297 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1298 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1299 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1300 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1301 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1302 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1303 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1304 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1305 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1306 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1307 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1308 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1309 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1310 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1311 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1312 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1313 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1314 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1315 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1316 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1317 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1318 else
1319 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1320 break;
1321
1322 case VINF_EM_RAW_TIMER_PENDING:
1323 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1324 break;
1325 case VINF_EM_RAW_INTERRUPT_PENDING:
1326 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1327 break;
1328 case VINF_VMM_CALL_HOST:
1329 switch (pVCpu->vmm.s.enmCallRing3Operation)
1330 {
1331 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1332 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1333 break;
1334 case VMMCALLRING3_PDM_LOCK:
1335 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1336 break;
1337 case VMMCALLRING3_PGM_POOL_GROW:
1338 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1339 break;
1340 case VMMCALLRING3_PGM_LOCK:
1341 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1342 break;
1343 case VMMCALLRING3_PGM_MAP_CHUNK:
1344 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1345 break;
1346 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1347 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1348 break;
1349 case VMMCALLRING3_VMM_LOGGER_FLUSH:
1350 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
1351 break;
1352 case VMMCALLRING3_VM_SET_ERROR:
1353 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1354 break;
1355 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1356 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1357 break;
1358 case VMMCALLRING3_VM_R0_ASSERTION:
1359 default:
1360 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1361 break;
1362 }
1363 break;
1364 case VINF_PATM_DUPLICATE_FUNCTION:
1365 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1366 break;
1367 case VINF_PGM_CHANGE_MODE:
1368 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1369 break;
1370 case VINF_PGM_POOL_FLUSH_PENDING:
1371 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1372 break;
1373 case VINF_EM_PENDING_REQUEST:
1374 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1375 break;
1376 case VINF_EM_HM_PATCH_TPR_INSTR:
1377 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1378 break;
1379 default:
1380 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1381 break;
1382 }
1383}
1384#endif /* VBOX_WITH_STATISTICS */
1385
1386
1387/**
1388 * The Ring 0 entry point, called by the fast-ioctl path.
1389 *
1390 * @param pGVM The global (ring-0) VM structure.
1391 * @param pVMIgnored The cross context VM structure. The return code is
1392 * stored in pVM->vmm.s.iLastGZRc.
1393 * @param idCpu The Virtual CPU ID of the calling EMT.
1394 * @param enmOperation Which operation to execute.
1395 * @remarks Assume called with interrupts _enabled_.
1396 */
1397VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1398{
1399 RT_NOREF(pVMIgnored);
1400
1401 /*
1402 * Validation.
1403 */
1404 if ( idCpu < pGVM->cCpus
1405 && pGVM->cCpus == pGVM->cCpusUnsafe)
1406 { /*likely*/ }
1407 else
1408 {
1409 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1410 return;
1411 }
1412
1413 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1414 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1415 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1416 && pGVCpu->hNativeThreadR0 == hNativeThread))
1417 { /* likely */ }
1418 else
1419 {
1420 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1421 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1422 return;
1423 }
1424
1425 /*
1426 * SMAP fun.
1427 */
1428 VMM_CHECK_SMAP_SETUP();
1429 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1430
1431 /*
1432 * Perform requested operation.
1433 */
1434 switch (enmOperation)
1435 {
1436 /*
1437 * Run guest code using the available hardware acceleration technology.
1438 */
1439 case VMMR0_DO_HM_RUN:
1440 {
1441 for (;;) /* hlt loop */
1442 {
1443 /*
1444 * Disable preemption.
1445 */
1446 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1447 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1448 RTThreadPreemptDisable(&PreemptState);
1449 pGVCpu->vmmr0.s.pPreemptState = &PreemptState;
1450
1451 /*
1452 * Get the host CPU identifiers, make sure they are valid and that
1453 * we've got a TSC delta for the CPU.
1454 */
1455 RTCPUID idHostCpu;
1456 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1457 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1458 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1459 {
1460 pGVCpu->vmmr0.s.iHostCpuSet = iHostCpuSet;
1461 ASMAtomicWriteU32(&pGVCpu->vmmr0.s.idHostCpu, idHostCpu);
1462
1463 pGVCpu->iHostCpuSet = iHostCpuSet;
1464 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1465
1466 /*
1467 * Update the periodic preemption timer if it's active.
1468 */
1469 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1470 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1471 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1472
1473#ifdef VMM_R0_TOUCH_FPU
1474 /*
1475 * Make sure we've got the FPU state loaded so and we don't need to clear
1476 * CR0.TS and get out of sync with the host kernel when loading the guest
1477 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1478 */
1479 CPUMR0TouchHostFpu();
1480#endif
1481 int rc;
1482 bool fPreemptRestored = false;
1483 if (!HMR0SuspendPending())
1484 {
1485 /*
1486 * Enable the context switching hook.
1487 */
1488 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1489 {
1490 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmmr0.s.hCtxHook));
1491 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmmr0.s.hCtxHook); AssertRC(rc2);
1492 }
1493
1494 /*
1495 * Enter HM context.
1496 */
1497 rc = HMR0Enter(pGVCpu);
1498 if (RT_SUCCESS(rc))
1499 {
1500 pGVCpu->vmmr0.s.fInHmContext = true;
1501 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1502
1503 /*
1504 * When preemption hooks are in place, enable preemption now that
1505 * we're in HM context.
1506 */
1507 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1508 {
1509 fPreemptRestored = true;
1510 pGVCpu->vmmr0.s.pPreemptState = NULL;
1511 RTThreadPreemptRestore(&PreemptState);
1512 }
1513
1514 /*
1515 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1516 */
1517 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1518 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);
1519 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1520
1521 /*
1522 * Assert sanity on the way out. Using manual assertions code here as normal
1523 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1524 */
1525 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1526 && RT_SUCCESS_NP(rc)
1527 && rc != VINF_VMM_CALL_HOST ))
1528 {
1529 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1530 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1531 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1532 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1533 }
1534#if 0
1535 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1536 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1537 {
1538 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1539 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1540 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1541 rc = VERR_VMM_CONTEXT_HOOK_STILL_ENABLED;
1542 }
1543#endif
1544
1545 pGVCpu->vmmr0.s.fInHmContext = false;
1546 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1547 }
1548 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1549
1550 /*
1551 * Invalidate the host CPU identifiers before we disable the context
1552 * hook / restore preemption.
1553 */
1554 pGVCpu->vmmr0.s.iHostCpuSet = UINT32_MAX;
1555 ASMAtomicWriteU32(&pGVCpu->vmmr0.s.idHostCpu, NIL_RTCPUID);
1556
1557 pGVCpu->iHostCpuSet = UINT32_MAX;
1558 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1559
1560 /*
1561 * Disable context hooks. Due to unresolved cleanup issues, we
1562 * cannot leave the hooks enabled when we return to ring-3.
1563 *
1564 * Note! At the moment HM may also have disabled the hook
1565 * when we get here, but the IPRT API handles that.
1566 */
1567 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1568 {
1569 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1570 RTThreadCtxHookDisable(pGVCpu->vmmr0.s.hCtxHook);
1571 }
1572 }
1573 /*
1574 * The system is about to go into suspend mode; go back to ring 3.
1575 */
1576 else
1577 {
1578 rc = VINF_EM_RAW_INTERRUPT;
1579 pGVCpu->iHostCpuSet = UINT32_MAX;
1580 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1581 }
1582
1583 /** @todo When HM stops messing with the context hook state, we'll disable
1584 * preemption again before the RTThreadCtxHookDisable call. */
1585 if (!fPreemptRestored)
1586 {
1587 pGVCpu->vmmr0.s.pPreemptState = NULL;
1588 RTThreadPreemptRestore(&PreemptState);
1589 }
1590
1591 pGVCpu->vmm.s.iLastGZRc = rc;
1592
1593 /* Fire dtrace probe and collect statistics. */
1594 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1595#ifdef VBOX_WITH_STATISTICS
1596 vmmR0RecordRC(pGVM, pGVCpu, rc);
1597#endif
1598 /*
1599 * If this is a halt.
1600 */
1601 if (rc != VINF_EM_HALT)
1602 { /* we're not in a hurry for a HLT, so prefer this path */ }
1603 else
1604 {
1605 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1606 if (rc == VINF_SUCCESS)
1607 {
1608 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1609 continue;
1610 }
1611 pGVCpu->vmm.s.cR0HaltsToRing3++;
1612 }
1613 }
1614 /*
1615 * Invalid CPU set index or TSC delta in need of measuring.
1616 */
1617 else
1618 {
1619 pGVCpu->vmmr0.s.pPreemptState = NULL;
1620 pGVCpu->vmmr0.s.iHostCpuSet = UINT32_MAX;
1621 ASMAtomicWriteU32(&pGVCpu->vmmr0.s.idHostCpu, NIL_RTCPUID);
1622 pGVCpu->iHostCpuSet = UINT32_MAX;
1623 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1624 RTThreadPreemptRestore(&PreemptState);
1625 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1626 {
1627 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1628 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1629 0 /*default cTries*/);
1630 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1631 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1632 else
1633 pGVCpu->vmm.s.iLastGZRc = rc;
1634 }
1635 else
1636 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1637 }
1638 break;
1639
1640 } /* halt loop. */
1641 break;
1642 }
1643
1644#ifdef VBOX_WITH_NEM_R0
1645# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1646 case VMMR0_DO_NEM_RUN:
1647 {
1648 /*
1649 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1650 */
1651 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1652# ifdef VBOXSTRICTRC_STRICT_ENABLED
1653 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1654# else
1655 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1656# endif
1657 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1658 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1659
1660 pGVCpu->vmm.s.iLastGZRc = rc;
1661
1662 /*
1663 * Fire dtrace probe and collect statistics.
1664 */
1665 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1666# ifdef VBOX_WITH_STATISTICS
1667 vmmR0RecordRC(pGVM, pGVCpu, rc);
1668# endif
1669 break;
1670 }
1671# endif
1672#endif
1673
1674 /*
1675 * For profiling.
1676 */
1677 case VMMR0_DO_NOP:
1678 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1679 break;
1680
1681 /*
1682 * Shouldn't happen.
1683 */
1684 default:
1685 AssertMsgFailed(("%#x\n", enmOperation));
1686 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1687 break;
1688 }
1689 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1690}
1691
1692
1693/**
1694 * Validates a session or VM session argument.
1695 *
1696 * @returns true / false accordingly.
1697 * @param pGVM The global (ring-0) VM structure.
1698 * @param pClaimedSession The session claim to validate.
1699 * @param pSession The session argument.
1700 */
1701DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1702{
1703 /* This must be set! */
1704 if (!pSession)
1705 return false;
1706
1707 /* Only one out of the two. */
1708 if (pGVM && pClaimedSession)
1709 return false;
1710 if (pGVM)
1711 pClaimedSession = pGVM->pSession;
1712 return pClaimedSession == pSession;
1713}
1714
1715
1716/**
1717 * VMMR0EntryEx worker function, either called directly or when ever possible
1718 * called thru a longjmp so we can exit safely on failure.
1719 *
1720 * @returns VBox status code.
1721 * @param pGVM The global (ring-0) VM structure.
1722 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1723 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1724 * @param enmOperation Which operation to execute.
1725 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1726 * The support driver validates this if it's present.
1727 * @param u64Arg Some simple constant argument.
1728 * @param pSession The session of the caller.
1729 *
1730 * @remarks Assume called with interrupts _enabled_.
1731 */
1732DECL_NO_INLINE(static, int) vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1733 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1734{
1735 /*
1736 * Validate pGVM and idCpu for consistency and validity.
1737 */
1738 if (pGVM != NULL)
1739 {
1740 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0))
1741 { /* likely */ }
1742 else
1743 {
1744 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1745 return VERR_INVALID_POINTER;
1746 }
1747
1748 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1749 { /* likely */ }
1750 else
1751 {
1752 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1753 return VERR_INVALID_PARAMETER;
1754 }
1755
1756 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1757 && pGVM->enmVMState <= VMSTATE_TERMINATED
1758 && pGVM->pSession == pSession
1759 && pGVM->pSelf == pGVM))
1760 { /* likely */ }
1761 else
1762 {
1763 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1764 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1765 return VERR_INVALID_POINTER;
1766 }
1767 }
1768 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1769 { /* likely */ }
1770 else
1771 {
1772 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1773 return VERR_INVALID_PARAMETER;
1774 }
1775
1776 /*
1777 * SMAP fun.
1778 */
1779 VMM_CHECK_SMAP_SETUP();
1780 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1781
1782 /*
1783 * Process the request.
1784 */
1785 int rc;
1786 switch (enmOperation)
1787 {
1788 /*
1789 * GVM requests
1790 */
1791 case VMMR0_DO_GVMM_CREATE_VM:
1792 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1793 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1794 else
1795 rc = VERR_INVALID_PARAMETER;
1796 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1797 break;
1798
1799 case VMMR0_DO_GVMM_DESTROY_VM:
1800 if (pReqHdr == NULL && u64Arg == 0)
1801 rc = GVMMR0DestroyVM(pGVM);
1802 else
1803 rc = VERR_INVALID_PARAMETER;
1804 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1805 break;
1806
1807 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1808 if (pGVM != NULL)
1809 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1810 else
1811 rc = VERR_INVALID_PARAMETER;
1812 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1813 break;
1814
1815 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1816 if (pGVM != NULL)
1817 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1818 else
1819 rc = VERR_INVALID_PARAMETER;
1820 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1821 break;
1822
1823 case VMMR0_DO_GVMM_SCHED_HALT:
1824 if (pReqHdr)
1825 return VERR_INVALID_PARAMETER;
1826 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1827 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1828 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1829 break;
1830
1831 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1832 if (pReqHdr || u64Arg)
1833 return VERR_INVALID_PARAMETER;
1834 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1835 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1836 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1837 break;
1838
1839 case VMMR0_DO_GVMM_SCHED_POKE:
1840 if (pReqHdr || u64Arg)
1841 return VERR_INVALID_PARAMETER;
1842 rc = GVMMR0SchedPoke(pGVM, idCpu);
1843 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1844 break;
1845
1846 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1847 if (u64Arg)
1848 return VERR_INVALID_PARAMETER;
1849 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1850 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1851 break;
1852
1853 case VMMR0_DO_GVMM_SCHED_POLL:
1854 if (pReqHdr || u64Arg > 1)
1855 return VERR_INVALID_PARAMETER;
1856 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
1857 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1858 break;
1859
1860 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1861 if (u64Arg)
1862 return VERR_INVALID_PARAMETER;
1863 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1864 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1865 break;
1866
1867 case VMMR0_DO_GVMM_RESET_STATISTICS:
1868 if (u64Arg)
1869 return VERR_INVALID_PARAMETER;
1870 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1871 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1872 break;
1873
1874 /*
1875 * Initialize the R0 part of a VM instance.
1876 */
1877 case VMMR0_DO_VMMR0_INIT:
1878 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1879 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1880 break;
1881
1882 /*
1883 * Does EMT specific ring-0 init.
1884 */
1885 case VMMR0_DO_VMMR0_INIT_EMT:
1886 rc = vmmR0InitVMEmt(pGVM, idCpu);
1887 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1888 break;
1889
1890 /*
1891 * Terminate the R0 part of a VM instance.
1892 */
1893 case VMMR0_DO_VMMR0_TERM:
1894 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
1895 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1896 break;
1897
1898 /*
1899 * Attempt to enable hm mode and check the current setting.
1900 */
1901 case VMMR0_DO_HM_ENABLE:
1902 rc = HMR0EnableAllCpus(pGVM);
1903 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1904 break;
1905
1906 /*
1907 * Setup the hardware accelerated session.
1908 */
1909 case VMMR0_DO_HM_SETUP_VM:
1910 rc = HMR0SetupVM(pGVM);
1911 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1912 break;
1913
1914 /*
1915 * PGM wrappers.
1916 */
1917 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1918 if (idCpu == NIL_VMCPUID)
1919 return VERR_INVALID_CPU_ID;
1920 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
1921 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1922 break;
1923
1924 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1925 if (idCpu == NIL_VMCPUID)
1926 return VERR_INVALID_CPU_ID;
1927 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
1928 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1929 break;
1930
1931 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1932 if (idCpu == NIL_VMCPUID)
1933 return VERR_INVALID_CPU_ID;
1934 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, idCpu);
1935 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1936 break;
1937
1938 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1939 if (idCpu != 0)
1940 return VERR_INVALID_CPU_ID;
1941 rc = PGMR0PhysSetupIoMmu(pGVM);
1942 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1943 break;
1944
1945 case VMMR0_DO_PGM_POOL_GROW:
1946 if (idCpu == NIL_VMCPUID)
1947 return VERR_INVALID_CPU_ID;
1948 rc = PGMR0PoolGrow(pGVM);
1949 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1950 break;
1951
1952 /*
1953 * GMM wrappers.
1954 */
1955 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1956 if (u64Arg)
1957 return VERR_INVALID_PARAMETER;
1958 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1959 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1960 break;
1961
1962 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1963 if (u64Arg)
1964 return VERR_INVALID_PARAMETER;
1965 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1966 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1967 break;
1968
1969 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1970 if (u64Arg)
1971 return VERR_INVALID_PARAMETER;
1972 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1973 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1974 break;
1975
1976 case VMMR0_DO_GMM_FREE_PAGES:
1977 if (u64Arg)
1978 return VERR_INVALID_PARAMETER;
1979 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1980 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1981 break;
1982
1983 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1984 if (u64Arg)
1985 return VERR_INVALID_PARAMETER;
1986 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1987 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1988 break;
1989
1990 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1991 if (u64Arg)
1992 return VERR_INVALID_PARAMETER;
1993 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1994 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1995 break;
1996
1997 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1998 if (idCpu == NIL_VMCPUID)
1999 return VERR_INVALID_CPU_ID;
2000 if (u64Arg)
2001 return VERR_INVALID_PARAMETER;
2002 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
2003 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2004 break;
2005
2006 case VMMR0_DO_GMM_BALLOONED_PAGES:
2007 if (u64Arg)
2008 return VERR_INVALID_PARAMETER;
2009 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
2010 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2011 break;
2012
2013 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
2014 if (u64Arg)
2015 return VERR_INVALID_PARAMETER;
2016 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
2017 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2018 break;
2019
2020 case VMMR0_DO_GMM_SEED_CHUNK:
2021 if (pReqHdr)
2022 return VERR_INVALID_PARAMETER;
2023 rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg);
2024 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2025 break;
2026
2027 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
2028 if (idCpu == NIL_VMCPUID)
2029 return VERR_INVALID_CPU_ID;
2030 if (u64Arg)
2031 return VERR_INVALID_PARAMETER;
2032 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
2033 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2034 break;
2035
2036 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
2037 if (idCpu == NIL_VMCPUID)
2038 return VERR_INVALID_CPU_ID;
2039 if (u64Arg)
2040 return VERR_INVALID_PARAMETER;
2041 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
2042 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2043 break;
2044
2045 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
2046 if (idCpu == NIL_VMCPUID)
2047 return VERR_INVALID_CPU_ID;
2048 if ( u64Arg
2049 || pReqHdr)
2050 return VERR_INVALID_PARAMETER;
2051 rc = GMMR0ResetSharedModules(pGVM, idCpu);
2052 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2053 break;
2054
2055#ifdef VBOX_WITH_PAGE_SHARING
2056 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
2057 {
2058 if (idCpu == NIL_VMCPUID)
2059 return VERR_INVALID_CPU_ID;
2060 if ( u64Arg
2061 || pReqHdr)
2062 return VERR_INVALID_PARAMETER;
2063 rc = GMMR0CheckSharedModules(pGVM, idCpu);
2064 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2065 break;
2066 }
2067#endif
2068
2069#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
2070 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
2071 if (u64Arg)
2072 return VERR_INVALID_PARAMETER;
2073 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
2074 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2075 break;
2076#endif
2077
2078 case VMMR0_DO_GMM_QUERY_STATISTICS:
2079 if (u64Arg)
2080 return VERR_INVALID_PARAMETER;
2081 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
2082 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2083 break;
2084
2085 case VMMR0_DO_GMM_RESET_STATISTICS:
2086 if (u64Arg)
2087 return VERR_INVALID_PARAMETER;
2088 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
2089 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2090 break;
2091
2092 /*
2093 * A quick GCFGM mock-up.
2094 */
2095 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
2096 case VMMR0_DO_GCFGM_SET_VALUE:
2097 case VMMR0_DO_GCFGM_QUERY_VALUE:
2098 {
2099 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2100 return VERR_INVALID_PARAMETER;
2101 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
2102 if (pReq->Hdr.cbReq != sizeof(*pReq))
2103 return VERR_INVALID_PARAMETER;
2104 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
2105 {
2106 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2107 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2108 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2109 }
2110 else
2111 {
2112 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2113 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2114 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2115 }
2116 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2117 break;
2118 }
2119
2120 /*
2121 * PDM Wrappers.
2122 */
2123 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2124 {
2125 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2126 return VERR_INVALID_PARAMETER;
2127 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2128 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2129 break;
2130 }
2131
2132 case VMMR0_DO_PDM_DEVICE_CREATE:
2133 {
2134 if (!pReqHdr || u64Arg || idCpu != 0)
2135 return VERR_INVALID_PARAMETER;
2136 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
2137 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2138 break;
2139 }
2140
2141 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2142 {
2143 if (!pReqHdr || u64Arg)
2144 return VERR_INVALID_PARAMETER;
2145 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr, idCpu);
2146 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2147 break;
2148 }
2149
2150 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2151 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2152 {
2153 if (!pReqHdr || u64Arg || idCpu != 0)
2154 return VERR_INVALID_PARAMETER;
2155 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2156 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2157 break;
2158 }
2159
2160 /*
2161 * Requests to the internal networking service.
2162 */
2163 case VMMR0_DO_INTNET_OPEN:
2164 {
2165 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2166 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2167 return VERR_INVALID_PARAMETER;
2168 rc = IntNetR0OpenReq(pSession, pReq);
2169 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2170 break;
2171 }
2172
2173 case VMMR0_DO_INTNET_IF_CLOSE:
2174 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2175 return VERR_INVALID_PARAMETER;
2176 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2177 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2178 break;
2179
2180
2181 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2182 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2183 return VERR_INVALID_PARAMETER;
2184 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2185 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2186 break;
2187
2188 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2189 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2190 return VERR_INVALID_PARAMETER;
2191 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2192 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2193 break;
2194
2195 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2196 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2197 return VERR_INVALID_PARAMETER;
2198 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2199 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2200 break;
2201
2202 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2203 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2204 return VERR_INVALID_PARAMETER;
2205 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2206 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2207 break;
2208
2209 case VMMR0_DO_INTNET_IF_SEND:
2210 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2211 return VERR_INVALID_PARAMETER;
2212 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2213 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2214 break;
2215
2216 case VMMR0_DO_INTNET_IF_WAIT:
2217 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2218 return VERR_INVALID_PARAMETER;
2219 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2220 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2221 break;
2222
2223 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2224 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2225 return VERR_INVALID_PARAMETER;
2226 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2227 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2228 break;
2229
2230#if 0 //def VBOX_WITH_PCI_PASSTHROUGH
2231 /*
2232 * Requests to host PCI driver service.
2233 */
2234 case VMMR0_DO_PCIRAW_REQ:
2235 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2236 return VERR_INVALID_PARAMETER;
2237 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2238 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2239 break;
2240#endif
2241
2242 /*
2243 * NEM requests.
2244 */
2245#ifdef VBOX_WITH_NEM_R0
2246# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2247 case VMMR0_DO_NEM_INIT_VM:
2248 if (u64Arg || pReqHdr || idCpu != 0)
2249 return VERR_INVALID_PARAMETER;
2250 rc = NEMR0InitVM(pGVM);
2251 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2252 break;
2253
2254 case VMMR0_DO_NEM_INIT_VM_PART_2:
2255 if (u64Arg || pReqHdr || idCpu != 0)
2256 return VERR_INVALID_PARAMETER;
2257 rc = NEMR0InitVMPart2(pGVM);
2258 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2259 break;
2260
2261 case VMMR0_DO_NEM_MAP_PAGES:
2262 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2263 return VERR_INVALID_PARAMETER;
2264 rc = NEMR0MapPages(pGVM, idCpu);
2265 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2266 break;
2267
2268 case VMMR0_DO_NEM_UNMAP_PAGES:
2269 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2270 return VERR_INVALID_PARAMETER;
2271 rc = NEMR0UnmapPages(pGVM, idCpu);
2272 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2273 break;
2274
2275 case VMMR0_DO_NEM_EXPORT_STATE:
2276 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2277 return VERR_INVALID_PARAMETER;
2278 rc = NEMR0ExportState(pGVM, idCpu);
2279 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2280 break;
2281
2282 case VMMR0_DO_NEM_IMPORT_STATE:
2283 if (pReqHdr || idCpu == NIL_VMCPUID)
2284 return VERR_INVALID_PARAMETER;
2285 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2286 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2287 break;
2288
2289 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2290 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2291 return VERR_INVALID_PARAMETER;
2292 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2293 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2294 break;
2295
2296 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2297 if (pReqHdr || idCpu == NIL_VMCPUID)
2298 return VERR_INVALID_PARAMETER;
2299 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2300 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2301 break;
2302
2303 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2304 if (u64Arg || pReqHdr)
2305 return VERR_INVALID_PARAMETER;
2306 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2307 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2308 break;
2309
2310# if 1 && defined(DEBUG_bird)
2311 case VMMR0_DO_NEM_EXPERIMENT:
2312 if (pReqHdr)
2313 return VERR_INVALID_PARAMETER;
2314 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2315 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2316 break;
2317# endif
2318# endif
2319#endif
2320
2321 /*
2322 * IOM requests.
2323 */
2324 case VMMR0_DO_IOM_GROW_IO_PORTS:
2325 {
2326 if (pReqHdr || idCpu != 0)
2327 return VERR_INVALID_PARAMETER;
2328 rc = IOMR0IoPortGrowRegistrationTables(pGVM, u64Arg);
2329 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2330 break;
2331 }
2332
2333 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2334 {
2335 if (pReqHdr || idCpu != 0)
2336 return VERR_INVALID_PARAMETER;
2337 rc = IOMR0IoPortGrowStatisticsTable(pGVM, u64Arg);
2338 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2339 break;
2340 }
2341
2342 case VMMR0_DO_IOM_GROW_MMIO_REGS:
2343 {
2344 if (pReqHdr || idCpu != 0)
2345 return VERR_INVALID_PARAMETER;
2346 rc = IOMR0MmioGrowRegistrationTables(pGVM, u64Arg);
2347 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2348 break;
2349 }
2350
2351 case VMMR0_DO_IOM_GROW_MMIO_STATS:
2352 {
2353 if (pReqHdr || idCpu != 0)
2354 return VERR_INVALID_PARAMETER;
2355 rc = IOMR0MmioGrowStatisticsTable(pGVM, u64Arg);
2356 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2357 break;
2358 }
2359
2360 case VMMR0_DO_IOM_SYNC_STATS_INDICES:
2361 {
2362 if (pReqHdr || idCpu != 0)
2363 return VERR_INVALID_PARAMETER;
2364 rc = IOMR0IoPortSyncStatisticsIndices(pGVM);
2365 if (RT_SUCCESS(rc))
2366 rc = IOMR0MmioSyncStatisticsIndices(pGVM);
2367 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2368 break;
2369 }
2370
2371 /*
2372 * DBGF requests.
2373 */
2374#ifdef VBOX_WITH_DBGF_TRACING
2375 case VMMR0_DO_DBGF_TRACER_CREATE:
2376 {
2377 if (!pReqHdr || u64Arg || idCpu != 0)
2378 return VERR_INVALID_PARAMETER;
2379 rc = DBGFR0TracerCreateReqHandler(pGVM, (PDBGFTRACERCREATEREQ)pReqHdr);
2380 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2381 break;
2382 }
2383
2384 case VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER:
2385 {
2386 if (!pReqHdr || u64Arg)
2387 return VERR_INVALID_PARAMETER;
2388# if 0 /** @todo */
2389 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu);
2390# else
2391 rc = VERR_NOT_IMPLEMENTED;
2392# endif
2393 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2394 break;
2395 }
2396#endif
2397
2398 case VMMR0_DO_DBGF_BP_INIT:
2399 {
2400 if (!pReqHdr || u64Arg || idCpu != 0)
2401 return VERR_INVALID_PARAMETER;
2402 rc = DBGFR0BpInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2403 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2404 break;
2405 }
2406
2407 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2408 {
2409 if (!pReqHdr || u64Arg || idCpu != 0)
2410 return VERR_INVALID_PARAMETER;
2411 rc = DBGFR0BpChunkAllocReqHandler(pGVM, (PDBGFBPCHUNKALLOCREQ)pReqHdr);
2412 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2413 break;
2414 }
2415
2416 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2417 {
2418 if (!pReqHdr || u64Arg || idCpu != 0)
2419 return VERR_INVALID_PARAMETER;
2420 rc = DBGFR0BpL2TblChunkAllocReqHandler(pGVM, (PDBGFBPL2TBLCHUNKALLOCREQ)pReqHdr);
2421 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2422 break;
2423 }
2424
2425 case VMMR0_DO_DBGF_BP_OWNER_INIT:
2426 {
2427 if (!pReqHdr || u64Arg || idCpu != 0)
2428 return VERR_INVALID_PARAMETER;
2429 rc = DBGFR0BpOwnerInitReqHandler(pGVM, (PDBGFBPOWNERINITREQ)pReqHdr);
2430 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2431 break;
2432 }
2433
2434 case VMMR0_DO_DBGF_BP_PORTIO_INIT:
2435 {
2436 if (!pReqHdr || u64Arg || idCpu != 0)
2437 return VERR_INVALID_PARAMETER;
2438 rc = DBGFR0BpPortIoInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2439 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2440 break;
2441 }
2442
2443
2444 /*
2445 * TM requests.
2446 */
2447 case VMMR0_DO_TM_GROW_TIMER_QUEUE:
2448 {
2449 if (pReqHdr || idCpu == NIL_VMCPUID)
2450 return VERR_INVALID_PARAMETER;
2451 rc = TMR0TimerQueueGrow(pGVM, RT_HI_U32(u64Arg), RT_LO_U32(u64Arg));
2452 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2453 break;
2454 }
2455
2456 /*
2457 * For profiling.
2458 */
2459 case VMMR0_DO_NOP:
2460 case VMMR0_DO_SLOW_NOP:
2461 return VINF_SUCCESS;
2462
2463 /*
2464 * For testing Ring-0 APIs invoked in this environment.
2465 */
2466 case VMMR0_DO_TESTS:
2467 /** @todo make new test */
2468 return VINF_SUCCESS;
2469
2470 default:
2471 /*
2472 * We're returning VERR_NOT_SUPPORT here so we've got something else
2473 * than -1 which the interrupt gate glue code might return.
2474 */
2475 Log(("operation %#x is not supported\n", enmOperation));
2476 return VERR_NOT_SUPPORTED;
2477 }
2478 return rc;
2479}
2480
2481
2482/**
2483 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2484 *
2485 * @returns VBox status code.
2486 * @param pvArgs The argument package
2487 */
2488static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2489{
2490 PGVMCPU pGVCpu = (PGVMCPU)pvArgs;
2491 return vmmR0EntryExWorker(pGVCpu->vmmr0.s.pGVM,
2492 pGVCpu->vmmr0.s.idCpu,
2493 pGVCpu->vmmr0.s.enmOperation,
2494 pGVCpu->vmmr0.s.pReq,
2495 pGVCpu->vmmr0.s.u64Arg,
2496 pGVCpu->vmmr0.s.pSession);
2497}
2498
2499
2500/**
2501 * The Ring 0 entry point, called by the support library (SUP).
2502 *
2503 * @returns VBox status code.
2504 * @param pGVM The global (ring-0) VM structure.
2505 * @param pVM The cross context VM structure.
2506 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2507 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2508 * @param enmOperation Which operation to execute.
2509 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2510 * @param u64Arg Some simple constant argument.
2511 * @param pSession The session of the caller.
2512 * @remarks Assume called with interrupts _enabled_.
2513 */
2514VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2515 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2516{
2517 /*
2518 * Requests that should only happen on the EMT thread will be
2519 * wrapped in a setjmp so we can assert without causing trouble.
2520 */
2521 if ( pVM != NULL
2522 && pGVM != NULL
2523 && pVM == pGVM /** @todo drop pVM or pGVM */
2524 && idCpu < pGVM->cCpus
2525 && pGVM->pSession == pSession
2526 && pGVM->pSelf == pVM)
2527 {
2528 switch (enmOperation)
2529 {
2530 /* These might/will be called before VMMR3Init. */
2531 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2532 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2533 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2534 case VMMR0_DO_GMM_FREE_PAGES:
2535 case VMMR0_DO_GMM_BALLOONED_PAGES:
2536 /* On the mac we might not have a valid jmp buf, so check these as well. */
2537 case VMMR0_DO_VMMR0_INIT:
2538 case VMMR0_DO_VMMR0_TERM:
2539
2540 case VMMR0_DO_PDM_DEVICE_CREATE:
2541 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2542 case VMMR0_DO_IOM_GROW_IO_PORTS:
2543 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2544 case VMMR0_DO_DBGF_BP_INIT:
2545 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2546 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2547 {
2548 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2549 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2550 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2551 && pGVCpu->hNativeThreadR0 == hNativeThread))
2552 {
2553 if (!pGVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2554 break;
2555
2556 pGVCpu->vmmr0.s.pGVM = pGVM;
2557 pGVCpu->vmmr0.s.idCpu = idCpu;
2558 pGVCpu->vmmr0.s.enmOperation = enmOperation;
2559 pGVCpu->vmmr0.s.pReq = pReq;
2560 pGVCpu->vmmr0.s.u64Arg = u64Arg;
2561 pGVCpu->vmmr0.s.pSession = pSession;
2562 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, pGVCpu,
2563 ((uintptr_t)u64Arg << 16) | (uintptr_t)enmOperation);
2564 }
2565 return VERR_VM_THREAD_NOT_EMT;
2566 }
2567
2568 default:
2569 case VMMR0_DO_PGM_POOL_GROW:
2570 break;
2571 }
2572 }
2573 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2574}
2575
2576
2577/**
2578 * Checks whether we've armed the ring-0 long jump machinery.
2579 *
2580 * @returns @c true / @c false
2581 * @param pVCpu The cross context virtual CPU structure.
2582 * @thread EMT
2583 * @sa VMMIsLongJumpArmed
2584 */
2585VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2586{
2587#ifdef RT_ARCH_X86
2588 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2589 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2590#else
2591 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2592 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2593#endif
2594}
2595
2596
2597/**
2598 * Checks whether we've done a ring-3 long jump.
2599 *
2600 * @returns @c true / @c false
2601 * @param pVCpu The cross context virtual CPU structure.
2602 * @thread EMT
2603 */
2604VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2605{
2606 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2607}
2608
2609
2610/**
2611 * Locking helper that deals with HM context and checks if the thread can block.
2612 *
2613 * @returns VINF_SUCCESS if we can block. Returns @a rcBusy or
2614 * VERR_VMM_CANNOT_BLOCK if not able to block.
2615 * @param pVCpu The cross context virtual CPU structure of the calling
2616 * thread.
2617 * @param rcBusy What to return in case of a blocking problem. Will IPE
2618 * if VINF_SUCCESS and we cannot block.
2619 * @param pszCaller The caller (for logging problems).
2620 * @param pvLock The lock address (for logging problems).
2621 * @param pCtx Where to return context info for the resume call.
2622 * @thread EMT(pVCpu)
2623 */
2624VMMR0_INT_DECL(int) VMMR0EmtPrepareToBlock(PVMCPUCC pVCpu, int rcBusy, const char *pszCaller, void *pvLock,
2625 PVMMR0EMTBLOCKCTX pCtx)
2626{
2627 const char *pszMsg;
2628
2629 /*
2630 * Check that we are allowed to block.
2631 */
2632 if (RT_LIKELY(VMMRZCallRing3IsEnabled(pVCpu)))
2633 {
2634 /*
2635 * Are we in HM context and w/o a context hook? If so work the context hook.
2636 */
2637 if (pVCpu->vmmr0.s.idHostCpu != NIL_RTCPUID)
2638 {
2639 Assert(pVCpu->vmmr0.s.iHostCpuSet != UINT32_MAX);
2640 Assert(pVCpu->vmmr0.s.fInHmContext);
2641
2642 if (pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK)
2643 {
2644 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_OUT, pVCpu);
2645 if (pVCpu->vmmr0.s.pPreemptState)
2646 RTThreadPreemptRestore(pVCpu->vmmr0.s.pPreemptState);
2647
2648 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2649 pCtx->fWasInHmContext = true;
2650 return VINF_SUCCESS;
2651 }
2652 }
2653
2654 if (RT_LIKELY(!pVCpu->vmmr0.s.pPreemptState))
2655 {
2656 /*
2657 * Not in HM context or we've got hooks, so just check that preemption
2658 * is enabled.
2659 */
2660 if (RT_LIKELY(RTThreadPreemptIsEnabled(NIL_RTTHREAD)))
2661 {
2662 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC;
2663 pCtx->fWasInHmContext = false;
2664 return VINF_SUCCESS;
2665 }
2666 pszMsg = "Preemption is disabled!";
2667 }
2668 else
2669 pszMsg = "Preemption state w/o HM state!";
2670 }
2671 else
2672 pszMsg = "Ring-3 calls are disabled!";
2673
2674 static uint32_t volatile s_cWarnings = 0;
2675 if (++s_cWarnings < 50)
2676 SUPR0Printf("VMMR0EmtPrepareToBlock: %s pvLock=%p pszCaller=%s rcBusy=%p\n", pszMsg, pvLock, pszCaller, rcBusy);
2677 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2678 pCtx->fWasInHmContext = false;
2679 return rcBusy != VINF_SUCCESS ? rcBusy : VERR_VMM_CANNOT_BLOCK;
2680}
2681
2682
2683/**
2684 * Counterpart to VMMR0EmtPrepareToBlock.
2685 *
2686 * @param pVCpu The cross context virtual CPU structure of the calling
2687 * thread.
2688 * @param pCtx The context structure used with VMMR0EmtPrepareToBlock.
2689 * @thread EMT(pVCpu)
2690 */
2691VMMR0_INT_DECL(void) VMMR0EmtResumeAfterBlocking(PVMCPUCC pVCpu, PVMMR0EMTBLOCKCTX pCtx)
2692{
2693 AssertReturnVoid(pCtx->uMagic == VMMR0EMTBLOCKCTX_MAGIC);
2694 if (pCtx->fWasInHmContext)
2695 {
2696 if (pVCpu->vmmr0.s.pPreemptState)
2697 RTThreadPreemptDisable(pVCpu->vmmr0.s.pPreemptState);
2698
2699 pCtx->fWasInHmContext = false;
2700 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_IN, pVCpu);
2701 }
2702 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
2703}
2704
2705
2706/**
2707 * Internal R0 logger worker: Flush logger.
2708 *
2709 * @param pLogger The logger instance to flush.
2710 * @remark This function must be exported!
2711 */
2712VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2713{
2714#ifdef LOG_ENABLED
2715 /*
2716 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2717 * (This is a bit paranoid code.)
2718 */
2719 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_UOFFSETOF(VMMR0LOGGER, Logger));
2720 if ( !VALID_PTR(pR0Logger)
2721 || !VALID_PTR(pR0Logger + 1)
2722 || pLogger->u32Magic != RTLOGGER_MAGIC)
2723 {
2724# ifdef DEBUG
2725 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2726# endif
2727 return;
2728 }
2729 if (pR0Logger->fFlushingDisabled)
2730 return; /* quietly */
2731
2732 PVMCC pVM = pR0Logger->pVM;
2733 if ( !VALID_PTR(pVM)
2734 || pVM->pSelf != pVM)
2735 {
2736# ifdef DEBUG
2737 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pSelf=%p! pLogger=%p\n", pVM, pVM->pSelf, pLogger);
2738# endif
2739 return;
2740 }
2741
2742 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2743 if (pVCpu)
2744 {
2745 /*
2746 * Check that the jump buffer is armed.
2747 */
2748# ifdef RT_ARCH_X86
2749 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2750 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2751# else
2752 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2753 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2754# endif
2755 {
2756# ifdef DEBUG
2757 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2758# endif
2759 return;
2760 }
2761 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2762 }
2763# ifdef DEBUG
2764 else
2765 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2766# endif
2767#else
2768 NOREF(pLogger);
2769#endif /* LOG_ENABLED */
2770}
2771
2772#ifdef LOG_ENABLED
2773
2774/**
2775 * Disables flushing of the ring-0 debug log.
2776 *
2777 * @param pVCpu The cross context virtual CPU structure.
2778 */
2779VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPUCC pVCpu)
2780{
2781 if (pVCpu->vmm.s.pR0LoggerR0)
2782 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2783 if (pVCpu->vmm.s.pR0RelLoggerR0)
2784 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = true;
2785}
2786
2787
2788/**
2789 * Enables flushing of the ring-0 debug log.
2790 *
2791 * @param pVCpu The cross context virtual CPU structure.
2792 */
2793VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPUCC pVCpu)
2794{
2795 if (pVCpu->vmm.s.pR0LoggerR0)
2796 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2797 if (pVCpu->vmm.s.pR0RelLoggerR0)
2798 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = false;
2799}
2800
2801
2802/**
2803 * Checks if log flushing is disabled or not.
2804 *
2805 * @param pVCpu The cross context virtual CPU structure.
2806 */
2807VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPUCC pVCpu)
2808{
2809 if (pVCpu->vmm.s.pR0LoggerR0)
2810 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2811 if (pVCpu->vmm.s.pR0RelLoggerR0)
2812 return pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled;
2813 return true;
2814}
2815
2816#endif /* LOG_ENABLED */
2817
2818/*
2819 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
2820 */
2821DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
2822{
2823 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
2824 if (pGVCpu)
2825 {
2826 PVMCPUCC pVCpu = pGVCpu;
2827 if (RT_VALID_PTR(pVCpu))
2828 {
2829 PVMMR0LOGGER pVmmLogger = pVCpu->vmm.s.pR0RelLoggerR0;
2830 if (RT_VALID_PTR(pVmmLogger))
2831 {
2832 if ( pVmmLogger->fCreated
2833 && pVmmLogger->pVM == pGVCpu->pGVM)
2834 {
2835 if (pVmmLogger->Logger.fFlags & RTLOGFLAGS_DISABLED)
2836 return NULL;
2837 uint16_t const fFlags = RT_LO_U16(fFlagsAndGroup);
2838 uint16_t const iGroup = RT_HI_U16(fFlagsAndGroup);
2839 if ( iGroup != UINT16_MAX
2840 && ( ( pVmmLogger->Logger.afGroups[iGroup < pVmmLogger->Logger.cGroups ? iGroup : 0]
2841 & (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED))
2842 != (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED)))
2843 return NULL;
2844 return &pVmmLogger->Logger;
2845 }
2846 }
2847 }
2848 }
2849 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
2850}
2851
2852
2853/*
2854 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2855 *
2856 * @returns true if the breakpoint should be hit, false if it should be ignored.
2857 */
2858DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2859{
2860#if 0
2861 return true;
2862#else
2863 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2864 if (pVM)
2865 {
2866 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2867
2868 if (pVCpu)
2869 {
2870# ifdef RT_ARCH_X86
2871 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2872 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2873# else
2874 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2875 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2876# endif
2877 {
2878 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2879 return RT_FAILURE_NP(rc);
2880 }
2881 }
2882 }
2883# ifdef RT_OS_LINUX
2884 return true;
2885# else
2886 return false;
2887# endif
2888#endif
2889}
2890
2891
2892/*
2893 * Override this so we can push it up to ring-3.
2894 */
2895DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2896{
2897 /*
2898 * To the log.
2899 */
2900 LogAlways(("\n!!R0-Assertion Failed!!\n"
2901 "Expression: %s\n"
2902 "Location : %s(%d) %s\n",
2903 pszExpr, pszFile, uLine, pszFunction));
2904
2905 /*
2906 * To the global VMM buffer.
2907 */
2908 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2909 if (pVM)
2910 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2911 "\n!!R0-Assertion Failed!!\n"
2912 "Expression: %.*s\n"
2913 "Location : %s(%d) %s\n",
2914 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2915 pszFile, uLine, pszFunction);
2916
2917 /*
2918 * Continue the normal way.
2919 */
2920 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2921}
2922
2923
2924/**
2925 * Callback for RTLogFormatV which writes to the ring-3 log port.
2926 * See PFNLOGOUTPUT() for details.
2927 */
2928static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2929{
2930 for (size_t i = 0; i < cbChars; i++)
2931 {
2932 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2933 }
2934
2935 NOREF(pv);
2936 return cbChars;
2937}
2938
2939
2940/*
2941 * Override this so we can push it up to ring-3.
2942 */
2943DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2944{
2945 va_list vaCopy;
2946
2947 /*
2948 * Push the message to the loggers.
2949 */
2950 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2951 if (pLog)
2952 {
2953 va_copy(vaCopy, va);
2954 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2955 va_end(vaCopy);
2956 }
2957 pLog = RTLogRelGetDefaultInstance();
2958 if (pLog)
2959 {
2960 va_copy(vaCopy, va);
2961 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2962 va_end(vaCopy);
2963 }
2964
2965 /*
2966 * Push it to the global VMM buffer.
2967 */
2968 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2969 if (pVM)
2970 {
2971 va_copy(vaCopy, va);
2972 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2973 va_end(vaCopy);
2974 }
2975
2976 /*
2977 * Continue the normal way.
2978 */
2979 RTAssertMsg2V(pszFormat, va);
2980}
2981
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette