VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 72617

Last change on this file since 72617 was 72617, checked in by vboxsync, 7 years ago

VMM: LogRel some ring-0 preemption details in ring-3.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 89.8 KB
Line 
1/* $Id: VMMR0.cpp 72617 2018-06-19 15:38:27Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#ifdef VBOX_WITH_NEM_R0
30# include <VBox/vmm/nem.h>
31#endif
32#include <VBox/vmm/stam.h>
33#include <VBox/vmm/tm.h>
34#include "VMMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/vmm/gvm.h>
37#ifdef VBOX_WITH_PCI_PASSTHROUGH
38# include <VBox/vmm/pdmpci.h>
39#endif
40#include <VBox/vmm/apic.h>
41
42#include <VBox/vmm/gvmm.h>
43#include <VBox/vmm/gmm.h>
44#include <VBox/vmm/gim.h>
45#include <VBox/intnet.h>
46#include <VBox/vmm/hm.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49#include <VBox/version.h>
50#include <VBox/log.h>
51
52#include <iprt/asm-amd64-x86.h>
53#include <iprt/assert.h>
54#include <iprt/crc.h>
55#include <iprt/mp.h>
56#include <iprt/once.h>
57#include <iprt/stdarg.h>
58#include <iprt/string.h>
59#include <iprt/thread.h>
60#include <iprt/timer.h>
61
62#include "dtrace/VBoxVMM.h"
63
64
65#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
66# pragma intrinsic(_AddressOfReturnAddress)
67#endif
68
69#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
70# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
71#endif
72
73
74
75/*********************************************************************************************************************************
76* Defined Constants And Macros *
77*********************************************************************************************************************************/
78/** @def VMM_CHECK_SMAP_SETUP
79 * SMAP check setup. */
80/** @def VMM_CHECK_SMAP_CHECK
81 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
82 * it will be logged and @a a_BadExpr is executed. */
83/** @def VMM_CHECK_SMAP_CHECK2
84 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
85 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
86 * executed. */
87#if defined(VBOX_STRICT) || 1
88# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
89# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
90 do { \
91 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
92 { \
93 RTCCUINTREG fEflCheck = ASMGetFlags(); \
94 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
95 { /* likely */ } \
96 else \
97 { \
98 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
99 a_BadExpr; \
100 } \
101 } \
102 } while (0)
103# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
104 do { \
105 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
106 { \
107 RTCCUINTREG fEflCheck = ASMGetFlags(); \
108 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
109 { /* likely */ } \
110 else \
111 { \
112 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
113 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
114 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
115 a_BadExpr; \
116 } \
117 } \
118 } while (0)
119#else
120# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
121# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
122# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) NOREF(fKernelFeatures)
123#endif
124
125
126/*********************************************************************************************************************************
127* Internal Functions *
128*********************************************************************************************************************************/
129RT_C_DECLS_BEGIN
130#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
131extern uint64_t __udivdi3(uint64_t, uint64_t);
132extern uint64_t __umoddi3(uint64_t, uint64_t);
133#endif
134RT_C_DECLS_END
135
136
137/*********************************************************************************************************************************
138* Global Variables *
139*********************************************************************************************************************************/
140/** Drag in necessary library bits.
141 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
142PFNRT g_VMMR0Deps[] =
143{
144 (PFNRT)RTCrc32,
145 (PFNRT)RTOnce,
146#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
147 (PFNRT)__udivdi3,
148 (PFNRT)__umoddi3,
149#endif
150 NULL
151};
152
153#ifdef RT_OS_SOLARIS
154/* Dependency information for the native solaris loader. */
155extern "C" { char _depends_on[] = "vboxdrv"; }
156#endif
157
158/** The result of SUPR0GetRawModeUsability(), set by ModuleInit(). */
159int g_rcRawModeUsability = VINF_SUCCESS;
160
161
162/**
163 * Initialize the module.
164 * This is called when we're first loaded.
165 *
166 * @returns 0 on success.
167 * @returns VBox status on failure.
168 * @param hMod Image handle for use in APIs.
169 */
170DECLEXPORT(int) ModuleInit(void *hMod)
171{
172 VMM_CHECK_SMAP_SETUP();
173 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
174
175#ifdef VBOX_WITH_DTRACE_R0
176 /*
177 * The first thing to do is register the static tracepoints.
178 * (Deregistration is automatic.)
179 */
180 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
181 if (RT_FAILURE(rc2))
182 return rc2;
183#endif
184 LogFlow(("ModuleInit:\n"));
185
186#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
187 /*
188 * Display the CMOS debug code.
189 */
190 ASMOutU8(0x72, 0x03);
191 uint8_t bDebugCode = ASMInU8(0x73);
192 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
193 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
194#endif
195
196 /*
197 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
198 */
199 int rc = vmmInitFormatTypes();
200 if (RT_SUCCESS(rc))
201 {
202 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
203 rc = GVMMR0Init();
204 if (RT_SUCCESS(rc))
205 {
206 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
207 rc = GMMR0Init();
208 if (RT_SUCCESS(rc))
209 {
210 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
211 rc = HMR0Init();
212 if (RT_SUCCESS(rc))
213 {
214 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
215 rc = PGMRegisterStringFormatTypes();
216 if (RT_SUCCESS(rc))
217 {
218 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
219#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
220 rc = PGMR0DynMapInit();
221#endif
222 if (RT_SUCCESS(rc))
223 {
224 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
225 rc = IntNetR0Init();
226 if (RT_SUCCESS(rc))
227 {
228#ifdef VBOX_WITH_PCI_PASSTHROUGH
229 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
230 rc = PciRawR0Init();
231#endif
232 if (RT_SUCCESS(rc))
233 {
234 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
235 rc = CPUMR0ModuleInit();
236 if (RT_SUCCESS(rc))
237 {
238#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
239 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
240 rc = vmmR0TripleFaultHackInit();
241 if (RT_SUCCESS(rc))
242#endif
243 {
244 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
245 if (RT_SUCCESS(rc))
246 {
247 g_rcRawModeUsability = SUPR0GetRawModeUsability();
248 if (g_rcRawModeUsability != VINF_SUCCESS)
249 SUPR0Printf("VMMR0!ModuleInit: SUPR0GetRawModeUsability -> %Rrc\n",
250 g_rcRawModeUsability);
251 LogFlow(("ModuleInit: returns success\n"));
252 return VINF_SUCCESS;
253 }
254 }
255
256 /*
257 * Bail out.
258 */
259#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
260 vmmR0TripleFaultHackTerm();
261#endif
262 }
263 else
264 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
265#ifdef VBOX_WITH_PCI_PASSTHROUGH
266 PciRawR0Term();
267#endif
268 }
269 else
270 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
271 IntNetR0Term();
272 }
273 else
274 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
275#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
276 PGMR0DynMapTerm();
277#endif
278 }
279 else
280 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
281 PGMDeregisterStringFormatTypes();
282 }
283 else
284 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
285 HMR0Term();
286 }
287 else
288 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
289 GMMR0Term();
290 }
291 else
292 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
293 GVMMR0Term();
294 }
295 else
296 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
297 vmmTermFormatTypes();
298 }
299 else
300 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
301
302 LogFlow(("ModuleInit: failed %Rrc\n", rc));
303 return rc;
304}
305
306
307/**
308 * Terminate the module.
309 * This is called when we're finally unloaded.
310 *
311 * @param hMod Image handle for use in APIs.
312 */
313DECLEXPORT(void) ModuleTerm(void *hMod)
314{
315 NOREF(hMod);
316 LogFlow(("ModuleTerm:\n"));
317
318 /*
319 * Terminate the CPUM module (Local APIC cleanup).
320 */
321 CPUMR0ModuleTerm();
322
323 /*
324 * Terminate the internal network service.
325 */
326 IntNetR0Term();
327
328 /*
329 * PGM (Darwin), HM and PciRaw global cleanup.
330 */
331#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
332 PGMR0DynMapTerm();
333#endif
334#ifdef VBOX_WITH_PCI_PASSTHROUGH
335 PciRawR0Term();
336#endif
337 PGMDeregisterStringFormatTypes();
338 HMR0Term();
339#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
340 vmmR0TripleFaultHackTerm();
341#endif
342
343 /*
344 * Destroy the GMM and GVMM instances.
345 */
346 GMMR0Term();
347 GVMMR0Term();
348
349 vmmTermFormatTypes();
350
351 LogFlow(("ModuleTerm: returns\n"));
352}
353
354
355/**
356 * Initiates the R0 driver for a particular VM instance.
357 *
358 * @returns VBox status code.
359 *
360 * @param pGVM The global (ring-0) VM structure.
361 * @param pVM The cross context VM structure.
362 * @param uSvnRev The SVN revision of the ring-3 part.
363 * @param uBuildType Build type indicator.
364 * @thread EMT(0)
365 */
366static int vmmR0InitVM(PGVM pGVM, PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
367{
368 VMM_CHECK_SMAP_SETUP();
369 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
370
371 /*
372 * Match the SVN revisions and build type.
373 */
374 if (uSvnRev != VMMGetSvnRev())
375 {
376 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
377 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
378 return VERR_VMM_R0_VERSION_MISMATCH;
379 }
380 if (uBuildType != vmmGetBuildType())
381 {
382 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
383 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
384 return VERR_VMM_R0_VERSION_MISMATCH;
385 }
386
387 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0 /*idCpu*/);
388 if (RT_FAILURE(rc))
389 return rc;
390
391
392#ifdef LOG_ENABLED
393 /*
394 * Register the EMT R0 logger instance for VCPU 0.
395 */
396 PVMCPU pVCpu = &pVM->aCpus[0];
397
398 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
399 if (pR0Logger)
400 {
401# if 0 /* testing of the logger. */
402 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
403 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
404 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
405 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
406
407 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
408 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
409 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
410 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
411
412 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
413 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
414 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
415 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
416
417 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
418 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
419 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
420 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
421 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
422 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
423
424 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
425 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
426
427 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
428 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
429 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
430# endif
431 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
432 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
433 pR0Logger->fRegistered = true;
434 }
435#endif /* LOG_ENABLED */
436
437 /*
438 * Check if the host supports high resolution timers or not.
439 */
440 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
441 && !RTTimerCanDoHighResolution())
442 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
443
444 /*
445 * Initialize the per VM data for GVMM and GMM.
446 */
447 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
448 rc = GVMMR0InitVM(pGVM);
449// if (RT_SUCCESS(rc))
450// rc = GMMR0InitPerVMData(pVM);
451 if (RT_SUCCESS(rc))
452 {
453 /*
454 * Init HM, CPUM and PGM (Darwin only).
455 */
456 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
457 rc = HMR0InitVM(pVM);
458 if (RT_SUCCESS(rc))
459 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
460 if (RT_SUCCESS(rc))
461 {
462 rc = CPUMR0InitVM(pVM);
463 if (RT_SUCCESS(rc))
464 {
465 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
466#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
467 rc = PGMR0DynMapInitVM(pVM);
468#endif
469 if (RT_SUCCESS(rc))
470 {
471 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
472#ifdef VBOX_WITH_PCI_PASSTHROUGH
473 rc = PciRawR0InitVM(pGVM, pVM);
474#endif
475 if (RT_SUCCESS(rc))
476 {
477 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
478 rc = GIMR0InitVM(pVM);
479 if (RT_SUCCESS(rc))
480 {
481 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
482 if (RT_SUCCESS(rc))
483 {
484 GVMMR0DoneInitVM(pGVM);
485
486 /*
487 * Collect a bit of info for the VM release log.
488 */
489 pVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
490 pVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
491
492 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
493 return rc;
494 }
495
496 /* bail out*/
497 GIMR0TermVM(pVM);
498 }
499#ifdef VBOX_WITH_PCI_PASSTHROUGH
500 PciRawR0TermVM(pGVM, pVM);
501#endif
502 }
503 }
504 }
505 HMR0TermVM(pVM);
506 }
507 }
508
509 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
510 return rc;
511}
512
513
514/**
515 * Does EMT specific VM initialization.
516 *
517 * @returns VBox status code.
518 * @param pGVM The ring-0 VM structure.
519 * @param pVM The cross context VM structure.
520 * @param idCpu The EMT that's calling.
521 */
522static int vmmR0InitVMEmt(PGVM pGVM, PVM pVM, VMCPUID idCpu)
523{
524 /* Paranoia (caller checked these already). */
525 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
526 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
527
528#ifdef LOG_ENABLED
529 /*
530 * Registration of ring 0 loggers.
531 */
532 PVMCPU pVCpu = &pVM->aCpus[idCpu];
533 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
534 if ( pR0Logger
535 && !pR0Logger->fRegistered)
536 {
537 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
538 pR0Logger->fRegistered = true;
539 }
540#endif
541 RT_NOREF(pVM);
542
543 return VINF_SUCCESS;
544}
545
546
547
548/**
549 * Terminates the R0 bits for a particular VM instance.
550 *
551 * This is normally called by ring-3 as part of the VM termination process, but
552 * may alternatively be called during the support driver session cleanup when
553 * the VM object is destroyed (see GVMM).
554 *
555 * @returns VBox status code.
556 *
557 * @param pGVM The global (ring-0) VM structure.
558 * @param pVM The cross context VM structure.
559 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
560 * thread.
561 * @thread EMT(0) or session clean up thread.
562 */
563VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, PVM pVM, VMCPUID idCpu)
564{
565 /*
566 * Check EMT(0) claim if we're called from userland.
567 */
568 if (idCpu != NIL_VMCPUID)
569 {
570 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
571 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
572 if (RT_FAILURE(rc))
573 return rc;
574 }
575
576#ifdef VBOX_WITH_PCI_PASSTHROUGH
577 PciRawR0TermVM(pGVM, pVM);
578#endif
579
580 /*
581 * Tell GVMM what we're up to and check that we only do this once.
582 */
583 if (GVMMR0DoingTermVM(pGVM))
584 {
585 GIMR0TermVM(pVM);
586
587 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
588 * here to make sure we don't leak any shared pages if we crash... */
589#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
590 PGMR0DynMapTermVM(pVM);
591#endif
592 HMR0TermVM(pVM);
593 }
594
595 /*
596 * Deregister the logger.
597 */
598 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
599 return VINF_SUCCESS;
600}
601
602
603/**
604 * VMM ring-0 thread-context callback.
605 *
606 * This does common HM state updating and calls the HM-specific thread-context
607 * callback.
608 *
609 * @param enmEvent The thread-context event.
610 * @param pvUser Opaque pointer to the VMCPU.
611 *
612 * @thread EMT(pvUser)
613 */
614static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
615{
616 PVMCPU pVCpu = (PVMCPU)pvUser;
617
618 switch (enmEvent)
619 {
620 case RTTHREADCTXEVENT_IN:
621 {
622 /*
623 * Linux may call us with preemption enabled (really!) but technically we
624 * cannot get preempted here, otherwise we end up in an infinite recursion
625 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
626 * ad infinitum). Let's just disable preemption for now...
627 */
628 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
629 * preemption after doing the callout (one or two functions up the
630 * call chain). */
631 /** @todo r=ramshankar: See @bugref{5313#c30}. */
632 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
633 RTThreadPreemptDisable(&ParanoidPreemptState);
634
635 /* We need to update the VCPU <-> host CPU mapping. */
636 RTCPUID idHostCpu;
637 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
638 pVCpu->iHostCpuSet = iHostCpuSet;
639 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
640
641 /* In the very unlikely event that the GIP delta for the CPU we're
642 rescheduled needs calculating, try force a return to ring-3.
643 We unfortunately cannot do the measurements right here. */
644 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
645 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
646
647 /* Invoke the HM-specific thread-context callback. */
648 HMR0ThreadCtxCallback(enmEvent, pvUser);
649
650 /* Restore preemption. */
651 RTThreadPreemptRestore(&ParanoidPreemptState);
652 break;
653 }
654
655 case RTTHREADCTXEVENT_OUT:
656 {
657 /* Invoke the HM-specific thread-context callback. */
658 HMR0ThreadCtxCallback(enmEvent, pvUser);
659
660 /*
661 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
662 * have the same host CPU associated with it.
663 */
664 pVCpu->iHostCpuSet = UINT32_MAX;
665 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
666 break;
667 }
668
669 default:
670 /* Invoke the HM-specific thread-context callback. */
671 HMR0ThreadCtxCallback(enmEvent, pvUser);
672 break;
673 }
674}
675
676
677/**
678 * Creates thread switching hook for the current EMT thread.
679 *
680 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
681 * platform does not implement switcher hooks, no hooks will be create and the
682 * member set to NIL_RTTHREADCTXHOOK.
683 *
684 * @returns VBox status code.
685 * @param pVCpu The cross context virtual CPU structure.
686 * @thread EMT(pVCpu)
687 */
688VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu)
689{
690 VMCPU_ASSERT_EMT(pVCpu);
691 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
692
693#if 1 /* To disable this stuff change to zero. */
694 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
695 if (RT_SUCCESS(rc))
696 return rc;
697#else
698 RT_NOREF(vmmR0ThreadCtxCallback);
699 int rc = VERR_NOT_SUPPORTED;
700#endif
701
702 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
703 if (rc == VERR_NOT_SUPPORTED)
704 return VINF_SUCCESS;
705
706 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
707 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
708}
709
710
711/**
712 * Destroys the thread switching hook for the specified VCPU.
713 *
714 * @param pVCpu The cross context virtual CPU structure.
715 * @remarks Can be called from any thread.
716 */
717VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu)
718{
719 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
720 AssertRC(rc);
721 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
722}
723
724
725/**
726 * Disables the thread switching hook for this VCPU (if we got one).
727 *
728 * @param pVCpu The cross context virtual CPU structure.
729 * @thread EMT(pVCpu)
730 *
731 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
732 * this call. This means you have to be careful with what you do!
733 */
734VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu)
735{
736 /*
737 * Clear the VCPU <-> host CPU mapping as we've left HM context.
738 * @bugref{7726#c19} explains the need for this trick:
739 *
740 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
741 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
742 * longjmp & normal return to ring-3, which opens a window where we may be
743 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
744 * the CPU starts executing a different EMT. Both functions first disables
745 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
746 * an opening for getting preempted.
747 */
748 /** @todo Make HM not need this API! Then we could leave the hooks enabled
749 * all the time. */
750 /** @todo move this into the context hook disabling if(). */
751 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
752
753 /*
754 * Disable the context hook, if we got one.
755 */
756 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
757 {
758 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
759 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
760 AssertRC(rc);
761 }
762}
763
764
765/**
766 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
767 *
768 * @returns true if registered, false otherwise.
769 * @param pVCpu The cross context virtual CPU structure.
770 */
771DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
772{
773 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
774}
775
776
777/**
778 * Whether thread-context hooks are registered for this VCPU.
779 *
780 * @returns true if registered, false otherwise.
781 * @param pVCpu The cross context virtual CPU structure.
782 */
783VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
784{
785 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
786}
787
788
789#ifdef VBOX_WITH_STATISTICS
790/**
791 * Record return code statistics
792 * @param pVM The cross context VM structure.
793 * @param pVCpu The cross context virtual CPU structure.
794 * @param rc The status code.
795 */
796static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
797{
798 /*
799 * Collect statistics.
800 */
801 switch (rc)
802 {
803 case VINF_SUCCESS:
804 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
805 break;
806 case VINF_EM_RAW_INTERRUPT:
807 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
808 break;
809 case VINF_EM_RAW_INTERRUPT_HYPER:
810 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
811 break;
812 case VINF_EM_RAW_GUEST_TRAP:
813 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
814 break;
815 case VINF_EM_RAW_RING_SWITCH:
816 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
817 break;
818 case VINF_EM_RAW_RING_SWITCH_INT:
819 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
820 break;
821 case VINF_EM_RAW_STALE_SELECTOR:
822 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
823 break;
824 case VINF_EM_RAW_IRET_TRAP:
825 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
826 break;
827 case VINF_IOM_R3_IOPORT_READ:
828 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
829 break;
830 case VINF_IOM_R3_IOPORT_WRITE:
831 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
832 break;
833 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
834 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
835 break;
836 case VINF_IOM_R3_MMIO_READ:
837 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
838 break;
839 case VINF_IOM_R3_MMIO_WRITE:
840 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
841 break;
842 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
843 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
844 break;
845 case VINF_IOM_R3_MMIO_READ_WRITE:
846 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
847 break;
848 case VINF_PATM_HC_MMIO_PATCH_READ:
849 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
850 break;
851 case VINF_PATM_HC_MMIO_PATCH_WRITE:
852 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
853 break;
854 case VINF_CPUM_R3_MSR_READ:
855 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
856 break;
857 case VINF_CPUM_R3_MSR_WRITE:
858 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
859 break;
860 case VINF_EM_RAW_EMULATE_INSTR:
861 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
862 break;
863 case VINF_EM_RAW_EMULATE_IO_BLOCK:
864 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
865 break;
866 case VINF_PATCH_EMULATE_INSTR:
867 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
868 break;
869 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
870 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
871 break;
872 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
873 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
874 break;
875 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
876 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
877 break;
878 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
879 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
880 break;
881 case VINF_CSAM_PENDING_ACTION:
882 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
883 break;
884 case VINF_PGM_SYNC_CR3:
885 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
886 break;
887 case VINF_PATM_PATCH_INT3:
888 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
889 break;
890 case VINF_PATM_PATCH_TRAP_PF:
891 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
892 break;
893 case VINF_PATM_PATCH_TRAP_GP:
894 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
895 break;
896 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
897 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
898 break;
899 case VINF_EM_RESCHEDULE_REM:
900 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
901 break;
902 case VINF_EM_RAW_TO_R3:
903 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
904 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
905 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
906 else if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
907 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
908 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES))
909 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
910 else if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
911 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
912 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
913 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
914 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))
915 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
916 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
917 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
918 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3))
919 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
920 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
921 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
922 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
923 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
924 else
925 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
926 break;
927
928 case VINF_EM_RAW_TIMER_PENDING:
929 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
930 break;
931 case VINF_EM_RAW_INTERRUPT_PENDING:
932 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
933 break;
934 case VINF_VMM_CALL_HOST:
935 switch (pVCpu->vmm.s.enmCallRing3Operation)
936 {
937 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
938 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
939 break;
940 case VMMCALLRING3_PDM_LOCK:
941 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
942 break;
943 case VMMCALLRING3_PGM_POOL_GROW:
944 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
945 break;
946 case VMMCALLRING3_PGM_LOCK:
947 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
948 break;
949 case VMMCALLRING3_PGM_MAP_CHUNK:
950 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
951 break;
952 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
953 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
954 break;
955 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
956 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
957 break;
958 case VMMCALLRING3_VMM_LOGGER_FLUSH:
959 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
960 break;
961 case VMMCALLRING3_VM_SET_ERROR:
962 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
963 break;
964 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
965 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
966 break;
967 case VMMCALLRING3_VM_R0_ASSERTION:
968 default:
969 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
970 break;
971 }
972 break;
973 case VINF_PATM_DUPLICATE_FUNCTION:
974 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
975 break;
976 case VINF_PGM_CHANGE_MODE:
977 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
978 break;
979 case VINF_PGM_POOL_FLUSH_PENDING:
980 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
981 break;
982 case VINF_EM_PENDING_REQUEST:
983 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
984 break;
985 case VINF_EM_HM_PATCH_TPR_INSTR:
986 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
987 break;
988 default:
989 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
990 break;
991 }
992}
993#endif /* VBOX_WITH_STATISTICS */
994
995
996/**
997 * The Ring 0 entry point, called by the fast-ioctl path.
998 *
999 * @param pGVM The global (ring-0) VM structure.
1000 * @param pVM The cross context VM structure.
1001 * The return code is stored in pVM->vmm.s.iLastGZRc.
1002 * @param idCpu The Virtual CPU ID of the calling EMT.
1003 * @param enmOperation Which operation to execute.
1004 * @remarks Assume called with interrupts _enabled_.
1005 */
1006VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1007{
1008 /*
1009 * Validation.
1010 */
1011 if ( idCpu < pGVM->cCpus
1012 && pGVM->cCpus == pVM->cCpus)
1013 { /*likely*/ }
1014 else
1015 {
1016 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x/%#x\n", idCpu, pGVM->cCpus, pVM->cCpus);
1017 return;
1018 }
1019
1020 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1021 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1022 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1023 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1024 && pVCpu->hNativeThreadR0 == hNativeThread))
1025 { /* likely */ }
1026 else
1027 {
1028 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pVCpu->hNativeThreadR0=%p\n",
1029 idCpu, hNativeThread, pGVCpu->hEMT, pVCpu->hNativeThreadR0);
1030 return;
1031 }
1032
1033 /*
1034 * SMAP fun.
1035 */
1036 VMM_CHECK_SMAP_SETUP();
1037 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1038
1039 /*
1040 * Perform requested operation.
1041 */
1042 switch (enmOperation)
1043 {
1044 /*
1045 * Switch to GC and run guest raw mode code.
1046 * Disable interrupts before doing the world switch.
1047 */
1048 case VMMR0_DO_RAW_RUN:
1049 {
1050#ifdef VBOX_WITH_RAW_MODE
1051# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1052 /* Some safety precautions first. */
1053 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1054 {
1055 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
1056 break;
1057 }
1058# endif
1059 if (RT_SUCCESS(g_rcRawModeUsability))
1060 { /* likely */ }
1061 else
1062 {
1063 pVCpu->vmm.s.iLastGZRc = g_rcRawModeUsability;
1064 break;
1065 }
1066
1067 /*
1068 * Disable preemption.
1069 */
1070 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1071 RTThreadPreemptDisable(&PreemptState);
1072
1073 /*
1074 * Get the host CPU identifiers, make sure they are valid and that
1075 * we've got a TSC delta for the CPU.
1076 */
1077 RTCPUID idHostCpu;
1078 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1079 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1080 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1081 {
1082 /*
1083 * Commit the CPU identifiers and update the periodict preemption timer if it's active.
1084 */
1085# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1086 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1087# endif
1088 pVCpu->iHostCpuSet = iHostCpuSet;
1089 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1090
1091 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1092 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1093
1094 /*
1095 * We might need to disable VT-x if the active switcher turns off paging.
1096 */
1097 bool fVTxDisabled;
1098 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1099 if (RT_SUCCESS(rc))
1100 {
1101 /*
1102 * Disable interrupts and run raw-mode code. The loop is for efficiently
1103 * dispatching tracepoints that fired in raw-mode context.
1104 */
1105 RTCCUINTREG uFlags = ASMIntDisableFlags();
1106
1107 for (;;)
1108 {
1109 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
1110 TMNotifyStartOfExecution(pVCpu);
1111
1112 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1113 pVCpu->vmm.s.iLastGZRc = rc;
1114
1115 TMNotifyEndOfExecution(pVCpu);
1116 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1117
1118 if (rc != VINF_VMM_CALL_TRACER)
1119 break;
1120 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
1121 }
1122
1123 /*
1124 * Re-enable VT-x before we dispatch any pending host interrupts and
1125 * re-enables interrupts.
1126 */
1127 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1128
1129 if ( rc == VINF_EM_RAW_INTERRUPT
1130 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1131 TRPMR0DispatchHostInterrupt(pVM);
1132
1133 ASMSetFlags(uFlags);
1134
1135 /* Fire dtrace probe and collect statistics. */
1136 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1137# ifdef VBOX_WITH_STATISTICS
1138 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1139 vmmR0RecordRC(pVM, pVCpu, rc);
1140# endif
1141 }
1142 else
1143 pVCpu->vmm.s.iLastGZRc = rc;
1144
1145 /*
1146 * Invalidate the host CPU identifiers as we restore preemption.
1147 */
1148 pVCpu->iHostCpuSet = UINT32_MAX;
1149 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1150
1151 RTThreadPreemptRestore(&PreemptState);
1152 }
1153 /*
1154 * Invalid CPU set index or TSC delta in need of measuring.
1155 */
1156 else
1157 {
1158 RTThreadPreemptRestore(&PreemptState);
1159 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1160 {
1161 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1162 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1163 0 /*default cTries*/);
1164 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1165 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1166 else
1167 pVCpu->vmm.s.iLastGZRc = rc;
1168 }
1169 else
1170 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1171 }
1172
1173#else /* !VBOX_WITH_RAW_MODE */
1174 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_NOT_SUPPORTED;
1175#endif
1176 break;
1177 }
1178
1179 /*
1180 * Run guest code using the available hardware acceleration technology.
1181 */
1182 case VMMR0_DO_HM_RUN:
1183 {
1184 /*
1185 * Disable preemption.
1186 */
1187 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1188 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1189 RTThreadPreemptDisable(&PreemptState);
1190
1191 /*
1192 * Get the host CPU identifiers, make sure they are valid and that
1193 * we've got a TSC delta for the CPU.
1194 */
1195 RTCPUID idHostCpu;
1196 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1197 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1198 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1199 {
1200 pVCpu->iHostCpuSet = iHostCpuSet;
1201 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1202
1203 /*
1204 * Update the periodic preemption timer if it's active.
1205 */
1206 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1207 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1208 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1209
1210#ifdef VMM_R0_TOUCH_FPU
1211 /*
1212 * Make sure we've got the FPU state loaded so and we don't need to clear
1213 * CR0.TS and get out of sync with the host kernel when loading the guest
1214 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1215 */
1216 CPUMR0TouchHostFpu();
1217#endif
1218 int rc;
1219 bool fPreemptRestored = false;
1220 if (!HMR0SuspendPending())
1221 {
1222 /*
1223 * Enable the context switching hook.
1224 */
1225 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1226 {
1227 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1228 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1229 }
1230
1231 /*
1232 * Enter HM context.
1233 */
1234 rc = HMR0Enter(pVM, pVCpu);
1235 if (RT_SUCCESS(rc))
1236 {
1237 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1238
1239 /*
1240 * When preemption hooks are in place, enable preemption now that
1241 * we're in HM context.
1242 */
1243 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1244 {
1245 fPreemptRestored = true;
1246 RTThreadPreemptRestore(&PreemptState);
1247 }
1248
1249 /*
1250 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1251 */
1252 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1253 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1254 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1255
1256 /*
1257 * Assert sanity on the way out. Using manual assertions code here as normal
1258 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1259 */
1260 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1261 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1262 {
1263 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1264 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1265 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1266 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1267 }
1268 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1269 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1270 {
1271 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1272 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1273 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1274 rc = VERR_INVALID_STATE;
1275 }
1276
1277 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1278 }
1279 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1280
1281 /*
1282 * Invalidate the host CPU identifiers before we disable the context
1283 * hook / restore preemption.
1284 */
1285 pVCpu->iHostCpuSet = UINT32_MAX;
1286 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1287
1288 /*
1289 * Disable context hooks. Due to unresolved cleanup issues, we
1290 * cannot leave the hooks enabled when we return to ring-3.
1291 *
1292 * Note! At the moment HM may also have disabled the hook
1293 * when we get here, but the IPRT API handles that.
1294 */
1295 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1296 {
1297 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1298 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1299 }
1300 }
1301 /*
1302 * The system is about to go into suspend mode; go back to ring 3.
1303 */
1304 else
1305 {
1306 rc = VINF_EM_RAW_INTERRUPT;
1307 pVCpu->iHostCpuSet = UINT32_MAX;
1308 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1309 }
1310
1311 /** @todo When HM stops messing with the context hook state, we'll disable
1312 * preemption again before the RTThreadCtxHookDisable call. */
1313 if (!fPreemptRestored)
1314 RTThreadPreemptRestore(&PreemptState);
1315
1316 pVCpu->vmm.s.iLastGZRc = rc;
1317
1318 /* Fire dtrace probe and collect statistics. */
1319 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1320#ifdef VBOX_WITH_STATISTICS
1321 vmmR0RecordRC(pVM, pVCpu, rc);
1322#endif
1323 }
1324 /*
1325 * Invalid CPU set index or TSC delta in need of measuring.
1326 */
1327 else
1328 {
1329 pVCpu->iHostCpuSet = UINT32_MAX;
1330 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1331 RTThreadPreemptRestore(&PreemptState);
1332 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1333 {
1334 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1335 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1336 0 /*default cTries*/);
1337 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1338 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1339 else
1340 pVCpu->vmm.s.iLastGZRc = rc;
1341 }
1342 else
1343 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1344 }
1345 break;
1346 }
1347
1348#ifdef VBOX_WITH_NEM_R0
1349# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1350 case VMMR0_DO_NEM_RUN:
1351 {
1352 /*
1353 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1354 */
1355 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1356 int rc = vmmR0CallRing3SetJmp2(&pVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1357 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1358 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1359
1360 pVCpu->vmm.s.iLastGZRc = rc;
1361
1362 /*
1363 * Fire dtrace probe and collect statistics.
1364 */
1365 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1366# ifdef VBOX_WITH_STATISTICS
1367 vmmR0RecordRC(pVM, pVCpu, rc);
1368# endif
1369 break;
1370 }
1371# endif
1372#endif
1373
1374
1375 /*
1376 * For profiling.
1377 */
1378 case VMMR0_DO_NOP:
1379 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1380 break;
1381
1382 /*
1383 * Shouldn't happen.
1384 */
1385 default:
1386 AssertMsgFailed(("%#x\n", enmOperation));
1387 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1388 break;
1389 }
1390 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1391}
1392
1393
1394/**
1395 * Validates a session or VM session argument.
1396 *
1397 * @returns true / false accordingly.
1398 * @param pVM The cross context VM structure.
1399 * @param pClaimedSession The session claim to validate.
1400 * @param pSession The session argument.
1401 */
1402DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1403{
1404 /* This must be set! */
1405 if (!pSession)
1406 return false;
1407
1408 /* Only one out of the two. */
1409 if (pVM && pClaimedSession)
1410 return false;
1411 if (pVM)
1412 pClaimedSession = pVM->pSession;
1413 return pClaimedSession == pSession;
1414}
1415
1416
1417/**
1418 * VMMR0EntryEx worker function, either called directly or when ever possible
1419 * called thru a longjmp so we can exit safely on failure.
1420 *
1421 * @returns VBox status code.
1422 * @param pGVM The global (ring-0) VM structure.
1423 * @param pVM The cross context VM structure.
1424 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1425 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1426 * @param enmOperation Which operation to execute.
1427 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1428 * The support driver validates this if it's present.
1429 * @param u64Arg Some simple constant argument.
1430 * @param pSession The session of the caller.
1431 *
1432 * @remarks Assume called with interrupts _enabled_.
1433 */
1434static int vmmR0EntryExWorker(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1435 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1436{
1437 /*
1438 * Validate pGVM, pVM and idCpu for consistency and validity.
1439 */
1440 if ( pGVM != NULL
1441 || pVM != NULL)
1442 {
1443 if (RT_LIKELY( RT_VALID_PTR(pGVM)
1444 && RT_VALID_PTR(pVM)
1445 && ((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0))
1446 { /* likely */ }
1447 else
1448 {
1449 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p and/or pVM=%p! (op=%d)\n", pGVM, pVM, enmOperation);
1450 return VERR_INVALID_POINTER;
1451 }
1452
1453 if (RT_LIKELY(pGVM->pVM == pVM))
1454 { /* likely */ }
1455 else
1456 {
1457 SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM->pVM=%p\n", pVM, pGVM->pVM);
1458 return VERR_INVALID_PARAMETER;
1459 }
1460
1461 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1462 { /* likely */ }
1463 else
1464 {
1465 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1466 return VERR_INVALID_PARAMETER;
1467 }
1468
1469 if (RT_LIKELY( pVM->enmVMState >= VMSTATE_CREATING
1470 && pVM->enmVMState <= VMSTATE_TERMINATED
1471 && pVM->cCpus == pGVM->cCpus
1472 && pVM->pSession == pSession
1473 && pVM->pVMR0 == pVM))
1474 { /* likely */ }
1475 else
1476 {
1477 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{.enmVMState=%d, .cCpus=%#x(==%#x), .pSession=%p(==%p), .pVMR0=%p(==%p)}! (op=%d)\n",
1478 pVM, pVM->enmVMState, pVM->cCpus, pGVM->cCpus, pVM->pSession, pSession, pVM->pVMR0, pVM, enmOperation);
1479 return VERR_INVALID_POINTER;
1480 }
1481 }
1482 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1483 { /* likely */ }
1484 else
1485 {
1486 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1487 return VERR_INVALID_PARAMETER;
1488 }
1489
1490 /*
1491 * SMAP fun.
1492 */
1493 VMM_CHECK_SMAP_SETUP();
1494 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1495
1496 /*
1497 * Process the request.
1498 */
1499 int rc;
1500 switch (enmOperation)
1501 {
1502 /*
1503 * GVM requests
1504 */
1505 case VMMR0_DO_GVMM_CREATE_VM:
1506 if (pGVM == NULL && pVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1507 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1508 else
1509 rc = VERR_INVALID_PARAMETER;
1510 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1511 break;
1512
1513 case VMMR0_DO_GVMM_DESTROY_VM:
1514 if (pReqHdr == NULL && u64Arg == 0)
1515 rc = GVMMR0DestroyVM(pGVM, pVM);
1516 else
1517 rc = VERR_INVALID_PARAMETER;
1518 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1519 break;
1520
1521 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1522 if (pGVM != NULL && pVM != NULL)
1523 rc = GVMMR0RegisterVCpu(pGVM, pVM, idCpu);
1524 else
1525 rc = VERR_INVALID_PARAMETER;
1526 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1527 break;
1528
1529 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1530 if (pGVM != NULL && pVM != NULL)
1531 rc = GVMMR0DeregisterVCpu(pGVM, pVM, idCpu);
1532 else
1533 rc = VERR_INVALID_PARAMETER;
1534 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1535 break;
1536
1537 case VMMR0_DO_GVMM_SCHED_HALT:
1538 if (pReqHdr)
1539 return VERR_INVALID_PARAMETER;
1540 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1541 rc = GVMMR0SchedHalt(pGVM, pVM, idCpu, u64Arg);
1542 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1543 break;
1544
1545 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1546 if (pReqHdr || u64Arg)
1547 return VERR_INVALID_PARAMETER;
1548 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1549 rc = GVMMR0SchedWakeUp(pGVM, pVM, idCpu);
1550 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1551 break;
1552
1553 case VMMR0_DO_GVMM_SCHED_POKE:
1554 if (pReqHdr || u64Arg)
1555 return VERR_INVALID_PARAMETER;
1556 rc = GVMMR0SchedPoke(pGVM, pVM, idCpu);
1557 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1558 break;
1559
1560 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1561 if (u64Arg)
1562 return VERR_INVALID_PARAMETER;
1563 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1564 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1565 break;
1566
1567 case VMMR0_DO_GVMM_SCHED_POLL:
1568 if (pReqHdr || u64Arg > 1)
1569 return VERR_INVALID_PARAMETER;
1570 rc = GVMMR0SchedPoll(pGVM, pVM, idCpu, !!u64Arg);
1571 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1572 break;
1573
1574 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1575 if (u64Arg)
1576 return VERR_INVALID_PARAMETER;
1577 rc = GVMMR0QueryStatisticsReq(pGVM, pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1578 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1579 break;
1580
1581 case VMMR0_DO_GVMM_RESET_STATISTICS:
1582 if (u64Arg)
1583 return VERR_INVALID_PARAMETER;
1584 rc = GVMMR0ResetStatisticsReq(pGVM, pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1585 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1586 break;
1587
1588 /*
1589 * Initialize the R0 part of a VM instance.
1590 */
1591 case VMMR0_DO_VMMR0_INIT:
1592 rc = vmmR0InitVM(pGVM, pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1593 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1594 break;
1595
1596 /*
1597 * Does EMT specific ring-0 init.
1598 */
1599 case VMMR0_DO_VMMR0_INIT_EMT:
1600 rc = vmmR0InitVMEmt(pGVM, pVM, idCpu);
1601 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1602 break;
1603
1604 /*
1605 * Terminate the R0 part of a VM instance.
1606 */
1607 case VMMR0_DO_VMMR0_TERM:
1608 rc = VMMR0TermVM(pGVM, pVM, 0 /*idCpu*/);
1609 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1610 break;
1611
1612 /*
1613 * Attempt to enable hm mode and check the current setting.
1614 */
1615 case VMMR0_DO_HM_ENABLE:
1616 rc = HMR0EnableAllCpus(pVM);
1617 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1618 break;
1619
1620 /*
1621 * Setup the hardware accelerated session.
1622 */
1623 case VMMR0_DO_HM_SETUP_VM:
1624 rc = HMR0SetupVM(pVM);
1625 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1626 break;
1627
1628 /*
1629 * Switch to RC to execute Hypervisor function.
1630 */
1631 case VMMR0_DO_CALL_HYPERVISOR:
1632 {
1633#ifdef VBOX_WITH_RAW_MODE
1634 /*
1635 * Validate input / context.
1636 */
1637 if (RT_UNLIKELY(idCpu != 0))
1638 return VERR_INVALID_CPU_ID;
1639 if (RT_UNLIKELY(pVM->cCpus != 1))
1640 return VERR_INVALID_PARAMETER;
1641 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1642# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1643 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1644 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1645# endif
1646 if (RT_FAILURE(g_rcRawModeUsability))
1647 return g_rcRawModeUsability;
1648
1649 /*
1650 * Disable interrupts.
1651 */
1652 RTCCUINTREG fFlags = ASMIntDisableFlags();
1653
1654 /*
1655 * Get the host CPU identifiers, make sure they are valid and that
1656 * we've got a TSC delta for the CPU.
1657 */
1658 RTCPUID idHostCpu;
1659 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1660 if (RT_UNLIKELY(iHostCpuSet >= RTCPUSET_MAX_CPUS))
1661 {
1662 ASMSetFlags(fFlags);
1663 return VERR_INVALID_CPU_INDEX;
1664 }
1665 if (RT_UNLIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1666 {
1667 ASMSetFlags(fFlags);
1668 rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1669 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1670 0 /*default cTries*/);
1671 if (RT_FAILURE(rc) && rc != VERR_CPU_OFFLINE)
1672 {
1673 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1674 return rc;
1675 }
1676 }
1677
1678 /*
1679 * Commit the CPU identifiers.
1680 */
1681# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1682 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1683# endif
1684 pVCpu->iHostCpuSet = iHostCpuSet;
1685 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1686
1687 /*
1688 * We might need to disable VT-x if the active switcher turns off paging.
1689 */
1690 bool fVTxDisabled;
1691 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1692 if (RT_SUCCESS(rc))
1693 {
1694 /*
1695 * Go through the wormhole...
1696 */
1697 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1698
1699 /*
1700 * Re-enable VT-x before we dispatch any pending host interrupts.
1701 */
1702 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1703
1704 if ( rc == VINF_EM_RAW_INTERRUPT
1705 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1706 TRPMR0DispatchHostInterrupt(pVM);
1707 }
1708
1709 /*
1710 * Invalidate the host CPU identifiers as we restore interrupts.
1711 */
1712 pVCpu->iHostCpuSet = UINT32_MAX;
1713 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1714 ASMSetFlags(fFlags);
1715
1716#else /* !VBOX_WITH_RAW_MODE */
1717 rc = VERR_RAW_MODE_NOT_SUPPORTED;
1718#endif
1719 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1720 break;
1721 }
1722
1723 /*
1724 * PGM wrappers.
1725 */
1726 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1727 if (idCpu == NIL_VMCPUID)
1728 return VERR_INVALID_CPU_ID;
1729 rc = PGMR0PhysAllocateHandyPages(pGVM, pVM, idCpu);
1730 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1731 break;
1732
1733 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1734 if (idCpu == NIL_VMCPUID)
1735 return VERR_INVALID_CPU_ID;
1736 rc = PGMR0PhysFlushHandyPages(pGVM, pVM, idCpu);
1737 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1738 break;
1739
1740 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1741 if (idCpu == NIL_VMCPUID)
1742 return VERR_INVALID_CPU_ID;
1743 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, pVM, idCpu);
1744 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1745 break;
1746
1747 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1748 if (idCpu != 0)
1749 return VERR_INVALID_CPU_ID;
1750 rc = PGMR0PhysSetupIoMmu(pGVM, pVM);
1751 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1752 break;
1753
1754 /*
1755 * GMM wrappers.
1756 */
1757 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1758 if (u64Arg)
1759 return VERR_INVALID_PARAMETER;
1760 rc = GMMR0InitialReservationReq(pGVM, pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1761 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1762 break;
1763
1764 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1765 if (u64Arg)
1766 return VERR_INVALID_PARAMETER;
1767 rc = GMMR0UpdateReservationReq(pGVM, pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1768 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1769 break;
1770
1771 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1772 if (u64Arg)
1773 return VERR_INVALID_PARAMETER;
1774 rc = GMMR0AllocatePagesReq(pGVM, pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1775 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1776 break;
1777
1778 case VMMR0_DO_GMM_FREE_PAGES:
1779 if (u64Arg)
1780 return VERR_INVALID_PARAMETER;
1781 rc = GMMR0FreePagesReq(pGVM, pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1782 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1783 break;
1784
1785 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1786 if (u64Arg)
1787 return VERR_INVALID_PARAMETER;
1788 rc = GMMR0FreeLargePageReq(pGVM, pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1789 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1790 break;
1791
1792 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1793 if (u64Arg)
1794 return VERR_INVALID_PARAMETER;
1795 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1796 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1797 break;
1798
1799 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1800 if (idCpu == NIL_VMCPUID)
1801 return VERR_INVALID_CPU_ID;
1802 if (u64Arg)
1803 return VERR_INVALID_PARAMETER;
1804 rc = GMMR0QueryMemoryStatsReq(pGVM, pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1805 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1806 break;
1807
1808 case VMMR0_DO_GMM_BALLOONED_PAGES:
1809 if (u64Arg)
1810 return VERR_INVALID_PARAMETER;
1811 rc = GMMR0BalloonedPagesReq(pGVM, pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1812 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1813 break;
1814
1815 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1816 if (u64Arg)
1817 return VERR_INVALID_PARAMETER;
1818 rc = GMMR0MapUnmapChunkReq(pGVM, pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1819 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1820 break;
1821
1822 case VMMR0_DO_GMM_SEED_CHUNK:
1823 if (pReqHdr)
1824 return VERR_INVALID_PARAMETER;
1825 rc = GMMR0SeedChunk(pGVM, pVM, idCpu, (RTR3PTR)u64Arg);
1826 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1827 break;
1828
1829 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1830 if (idCpu == NIL_VMCPUID)
1831 return VERR_INVALID_CPU_ID;
1832 if (u64Arg)
1833 return VERR_INVALID_PARAMETER;
1834 rc = GMMR0RegisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1835 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1836 break;
1837
1838 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1839 if (idCpu == NIL_VMCPUID)
1840 return VERR_INVALID_CPU_ID;
1841 if (u64Arg)
1842 return VERR_INVALID_PARAMETER;
1843 rc = GMMR0UnregisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1844 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1845 break;
1846
1847 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1848 if (idCpu == NIL_VMCPUID)
1849 return VERR_INVALID_CPU_ID;
1850 if ( u64Arg
1851 || pReqHdr)
1852 return VERR_INVALID_PARAMETER;
1853 rc = GMMR0ResetSharedModules(pGVM, pVM, idCpu);
1854 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1855 break;
1856
1857#ifdef VBOX_WITH_PAGE_SHARING
1858 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1859 {
1860 if (idCpu == NIL_VMCPUID)
1861 return VERR_INVALID_CPU_ID;
1862 if ( u64Arg
1863 || pReqHdr)
1864 return VERR_INVALID_PARAMETER;
1865 rc = GMMR0CheckSharedModules(pGVM, pVM, idCpu);
1866 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1867 break;
1868 }
1869#endif
1870
1871#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1872 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1873 if (u64Arg)
1874 return VERR_INVALID_PARAMETER;
1875 rc = GMMR0FindDuplicatePageReq(pGVM, pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1876 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1877 break;
1878#endif
1879
1880 case VMMR0_DO_GMM_QUERY_STATISTICS:
1881 if (u64Arg)
1882 return VERR_INVALID_PARAMETER;
1883 rc = GMMR0QueryStatisticsReq(pGVM, pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1884 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1885 break;
1886
1887 case VMMR0_DO_GMM_RESET_STATISTICS:
1888 if (u64Arg)
1889 return VERR_INVALID_PARAMETER;
1890 rc = GMMR0ResetStatisticsReq(pGVM, pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1891 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1892 break;
1893
1894 /*
1895 * A quick GCFGM mock-up.
1896 */
1897 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1898 case VMMR0_DO_GCFGM_SET_VALUE:
1899 case VMMR0_DO_GCFGM_QUERY_VALUE:
1900 {
1901 if (pGVM || pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1902 return VERR_INVALID_PARAMETER;
1903 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1904 if (pReq->Hdr.cbReq != sizeof(*pReq))
1905 return VERR_INVALID_PARAMETER;
1906 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1907 {
1908 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1909 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1910 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1911 }
1912 else
1913 {
1914 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1915 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1916 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1917 }
1918 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1919 break;
1920 }
1921
1922 /*
1923 * PDM Wrappers.
1924 */
1925 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1926 {
1927 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1928 return VERR_INVALID_PARAMETER;
1929 rc = PDMR0DriverCallReqHandler(pGVM, pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1930 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1931 break;
1932 }
1933
1934 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1935 {
1936 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1937 return VERR_INVALID_PARAMETER;
1938 rc = PDMR0DeviceCallReqHandler(pGVM, pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1939 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1940 break;
1941 }
1942
1943 /*
1944 * Requests to the internal networking service.
1945 */
1946 case VMMR0_DO_INTNET_OPEN:
1947 {
1948 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1949 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1950 return VERR_INVALID_PARAMETER;
1951 rc = IntNetR0OpenReq(pSession, pReq);
1952 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1953 break;
1954 }
1955
1956 case VMMR0_DO_INTNET_IF_CLOSE:
1957 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1958 return VERR_INVALID_PARAMETER;
1959 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1960 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1961 break;
1962
1963
1964 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1965 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1966 return VERR_INVALID_PARAMETER;
1967 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1968 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1969 break;
1970
1971 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1972 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1973 return VERR_INVALID_PARAMETER;
1974 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1975 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1976 break;
1977
1978 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1979 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1980 return VERR_INVALID_PARAMETER;
1981 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1982 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1983 break;
1984
1985 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1986 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1987 return VERR_INVALID_PARAMETER;
1988 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1989 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1990 break;
1991
1992 case VMMR0_DO_INTNET_IF_SEND:
1993 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1994 return VERR_INVALID_PARAMETER;
1995 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1996 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1997 break;
1998
1999 case VMMR0_DO_INTNET_IF_WAIT:
2000 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2001 return VERR_INVALID_PARAMETER;
2002 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2003 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2004 break;
2005
2006 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2007 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2008 return VERR_INVALID_PARAMETER;
2009 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2010 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2011 break;
2012
2013#ifdef VBOX_WITH_PCI_PASSTHROUGH
2014 /*
2015 * Requests to host PCI driver service.
2016 */
2017 case VMMR0_DO_PCIRAW_REQ:
2018 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2019 return VERR_INVALID_PARAMETER;
2020 rc = PciRawR0ProcessReq(pGVM, pVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2021 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2022 break;
2023#endif
2024
2025 /*
2026 * NEM requests.
2027 */
2028#ifdef VBOX_WITH_NEM_R0
2029# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2030 case VMMR0_DO_NEM_INIT_VM:
2031 if (u64Arg || pReqHdr || idCpu != 0)
2032 return VERR_INVALID_PARAMETER;
2033 rc = NEMR0InitVM(pGVM, pVM);
2034 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2035 break;
2036
2037 case VMMR0_DO_NEM_INIT_VM_PART_2:
2038 if (u64Arg || pReqHdr || idCpu != 0)
2039 return VERR_INVALID_PARAMETER;
2040 rc = NEMR0InitVMPart2(pGVM, pVM);
2041 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2042 break;
2043
2044 case VMMR0_DO_NEM_MAP_PAGES:
2045 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2046 return VERR_INVALID_PARAMETER;
2047 rc = NEMR0MapPages(pGVM, pVM, idCpu);
2048 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2049 break;
2050
2051 case VMMR0_DO_NEM_UNMAP_PAGES:
2052 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2053 return VERR_INVALID_PARAMETER;
2054 rc = NEMR0UnmapPages(pGVM, pVM, idCpu);
2055 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2056 break;
2057
2058 case VMMR0_DO_NEM_EXPORT_STATE:
2059 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2060 return VERR_INVALID_PARAMETER;
2061 rc = NEMR0ExportState(pGVM, pVM, idCpu);
2062 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2063 break;
2064
2065 case VMMR0_DO_NEM_IMPORT_STATE:
2066 if (pReqHdr || idCpu == NIL_VMCPUID)
2067 return VERR_INVALID_PARAMETER;
2068 rc = NEMR0ImportState(pGVM, pVM, idCpu, u64Arg);
2069 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2070 break;
2071
2072 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2073 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2074 return VERR_INVALID_PARAMETER;
2075 rc = NEMR0QueryCpuTick(pGVM, pVM, idCpu);
2076 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2077 break;
2078
2079 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2080 if (pReqHdr || idCpu == NIL_VMCPUID)
2081 return VERR_INVALID_PARAMETER;
2082 rc = NEMR0ResumeCpuTickOnAll(pGVM, pVM, idCpu, u64Arg);
2083 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2084 break;
2085
2086 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2087 if (u64Arg || pReqHdr)
2088 return VERR_INVALID_PARAMETER;
2089 rc = NEMR0UpdateStatistics(pGVM, pVM, idCpu);
2090 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2091 break;
2092
2093# if 1 && defined(DEBUG_bird)
2094 case VMMR0_DO_NEM_EXPERIMENT:
2095 if (pReqHdr)
2096 return VERR_INVALID_PARAMETER;
2097 rc = NEMR0DoExperiment(pGVM, pVM, idCpu, u64Arg);
2098 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2099 break;
2100# endif
2101# endif
2102#endif
2103
2104 /*
2105 * For profiling.
2106 */
2107 case VMMR0_DO_NOP:
2108 case VMMR0_DO_SLOW_NOP:
2109 return VINF_SUCCESS;
2110
2111 /*
2112 * For testing Ring-0 APIs invoked in this environment.
2113 */
2114 case VMMR0_DO_TESTS:
2115 /** @todo make new test */
2116 return VINF_SUCCESS;
2117
2118
2119#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
2120 case VMMR0_DO_TEST_SWITCHER3264:
2121 if (idCpu == NIL_VMCPUID)
2122 return VERR_INVALID_CPU_ID;
2123 rc = HMR0TestSwitcher3264(pVM);
2124 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2125 break;
2126#endif
2127 default:
2128 /*
2129 * We're returning VERR_NOT_SUPPORT here so we've got something else
2130 * than -1 which the interrupt gate glue code might return.
2131 */
2132 Log(("operation %#x is not supported\n", enmOperation));
2133 return VERR_NOT_SUPPORTED;
2134 }
2135 return rc;
2136}
2137
2138
2139/**
2140 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
2141 */
2142typedef struct VMMR0ENTRYEXARGS
2143{
2144 PGVM pGVM;
2145 PVM pVM;
2146 VMCPUID idCpu;
2147 VMMR0OPERATION enmOperation;
2148 PSUPVMMR0REQHDR pReq;
2149 uint64_t u64Arg;
2150 PSUPDRVSESSION pSession;
2151} VMMR0ENTRYEXARGS;
2152/** Pointer to a vmmR0EntryExWrapper argument package. */
2153typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
2154
2155/**
2156 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2157 *
2158 * @returns VBox status code.
2159 * @param pvArgs The argument package
2160 */
2161static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2162{
2163 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2164 ((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
2165 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2166 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2167 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2168 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2169 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2170}
2171
2172
2173/**
2174 * The Ring 0 entry point, called by the support library (SUP).
2175 *
2176 * @returns VBox status code.
2177 * @param pGVM The global (ring-0) VM structure.
2178 * @param pVM The cross context VM structure.
2179 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2180 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2181 * @param enmOperation Which operation to execute.
2182 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2183 * @param u64Arg Some simple constant argument.
2184 * @param pSession The session of the caller.
2185 * @remarks Assume called with interrupts _enabled_.
2186 */
2187VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2188 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2189{
2190 /*
2191 * Requests that should only happen on the EMT thread will be
2192 * wrapped in a setjmp so we can assert without causing trouble.
2193 */
2194 if ( pVM != NULL
2195 && pGVM != NULL
2196 && idCpu < pGVM->cCpus
2197 && pVM->pVMR0 != NULL)
2198 {
2199 switch (enmOperation)
2200 {
2201 /* These might/will be called before VMMR3Init. */
2202 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2203 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2204 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2205 case VMMR0_DO_GMM_FREE_PAGES:
2206 case VMMR0_DO_GMM_BALLOONED_PAGES:
2207 /* On the mac we might not have a valid jmp buf, so check these as well. */
2208 case VMMR0_DO_VMMR0_INIT:
2209 case VMMR0_DO_VMMR0_TERM:
2210 {
2211 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2212 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2213 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2214 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2215 && pVCpu->hNativeThreadR0 == hNativeThread))
2216 {
2217 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2218 break;
2219
2220 /** @todo validate this EMT claim... GVM knows. */
2221 VMMR0ENTRYEXARGS Args;
2222 Args.pGVM = pGVM;
2223 Args.pVM = pVM;
2224 Args.idCpu = idCpu;
2225 Args.enmOperation = enmOperation;
2226 Args.pReq = pReq;
2227 Args.u64Arg = u64Arg;
2228 Args.pSession = pSession;
2229 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2230 }
2231 return VERR_VM_THREAD_NOT_EMT;
2232 }
2233
2234 default:
2235 break;
2236 }
2237 }
2238 return vmmR0EntryExWorker(pGVM, pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2239}
2240
2241
2242/**
2243 * Checks whether we've armed the ring-0 long jump machinery.
2244 *
2245 * @returns @c true / @c false
2246 * @param pVCpu The cross context virtual CPU structure.
2247 * @thread EMT
2248 * @sa VMMIsLongJumpArmed
2249 */
2250VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
2251{
2252#ifdef RT_ARCH_X86
2253 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2254 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2255#else
2256 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2257 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2258#endif
2259}
2260
2261
2262/**
2263 * Checks whether we've done a ring-3 long jump.
2264 *
2265 * @returns @c true / @c false
2266 * @param pVCpu The cross context virtual CPU structure.
2267 * @thread EMT
2268 */
2269VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
2270{
2271 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2272}
2273
2274
2275/**
2276 * Internal R0 logger worker: Flush logger.
2277 *
2278 * @param pLogger The logger instance to flush.
2279 * @remark This function must be exported!
2280 */
2281VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2282{
2283#ifdef LOG_ENABLED
2284 /*
2285 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2286 * (This is a bit paranoid code.)
2287 */
2288 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2289 if ( !VALID_PTR(pR0Logger)
2290 || !VALID_PTR(pR0Logger + 1)
2291 || pLogger->u32Magic != RTLOGGER_MAGIC)
2292 {
2293# ifdef DEBUG
2294 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2295# endif
2296 return;
2297 }
2298 if (pR0Logger->fFlushingDisabled)
2299 return; /* quietly */
2300
2301 PVM pVM = pR0Logger->pVM;
2302 if ( !VALID_PTR(pVM)
2303 || pVM->pVMR0 != pVM)
2304 {
2305# ifdef DEBUG
2306 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
2307# endif
2308 return;
2309 }
2310
2311 PVMCPU pVCpu = VMMGetCpu(pVM);
2312 if (pVCpu)
2313 {
2314 /*
2315 * Check that the jump buffer is armed.
2316 */
2317# ifdef RT_ARCH_X86
2318 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2319 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2320# else
2321 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2322 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2323# endif
2324 {
2325# ifdef DEBUG
2326 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2327# endif
2328 return;
2329 }
2330 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2331 }
2332# ifdef DEBUG
2333 else
2334 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2335# endif
2336#else
2337 NOREF(pLogger);
2338#endif /* LOG_ENABLED */
2339}
2340
2341/**
2342 * Internal R0 logger worker: Custom prefix.
2343 *
2344 * @returns Number of chars written.
2345 *
2346 * @param pLogger The logger instance.
2347 * @param pchBuf The output buffer.
2348 * @param cchBuf The size of the buffer.
2349 * @param pvUser User argument (ignored).
2350 */
2351VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
2352{
2353 NOREF(pvUser);
2354#ifdef LOG_ENABLED
2355 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2356 if ( !VALID_PTR(pR0Logger)
2357 || !VALID_PTR(pR0Logger + 1)
2358 || pLogger->u32Magic != RTLOGGER_MAGIC
2359 || cchBuf < 2)
2360 return 0;
2361
2362 static const char s_szHex[17] = "0123456789abcdef";
2363 VMCPUID const idCpu = pR0Logger->idCpu;
2364 pchBuf[1] = s_szHex[ idCpu & 15];
2365 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
2366
2367 return 2;
2368#else
2369 NOREF(pLogger); NOREF(pchBuf); NOREF(cchBuf);
2370 return 0;
2371#endif
2372}
2373
2374#ifdef LOG_ENABLED
2375
2376/**
2377 * Disables flushing of the ring-0 debug log.
2378 *
2379 * @param pVCpu The cross context virtual CPU structure.
2380 */
2381VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
2382{
2383 if (pVCpu->vmm.s.pR0LoggerR0)
2384 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2385}
2386
2387
2388/**
2389 * Enables flushing of the ring-0 debug log.
2390 *
2391 * @param pVCpu The cross context virtual CPU structure.
2392 */
2393VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
2394{
2395 if (pVCpu->vmm.s.pR0LoggerR0)
2396 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2397}
2398
2399
2400/**
2401 * Checks if log flushing is disabled or not.
2402 *
2403 * @param pVCpu The cross context virtual CPU structure.
2404 */
2405VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
2406{
2407 if (pVCpu->vmm.s.pR0LoggerR0)
2408 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2409 return true;
2410}
2411#endif /* LOG_ENABLED */
2412
2413/**
2414 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2415 *
2416 * @returns true if the breakpoint should be hit, false if it should be ignored.
2417 */
2418DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2419{
2420#if 0
2421 return true;
2422#else
2423 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2424 if (pVM)
2425 {
2426 PVMCPU pVCpu = VMMGetCpu(pVM);
2427
2428 if (pVCpu)
2429 {
2430#ifdef RT_ARCH_X86
2431 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2432 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2433#else
2434 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2435 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2436#endif
2437 {
2438 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2439 return RT_FAILURE_NP(rc);
2440 }
2441 }
2442 }
2443#ifdef RT_OS_LINUX
2444 return true;
2445#else
2446 return false;
2447#endif
2448#endif
2449}
2450
2451
2452/**
2453 * Override this so we can push it up to ring-3.
2454 *
2455 * @param pszExpr Expression. Can be NULL.
2456 * @param uLine Location line number.
2457 * @param pszFile Location file name.
2458 * @param pszFunction Location function name.
2459 */
2460DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2461{
2462 /*
2463 * To the log.
2464 */
2465 LogAlways(("\n!!R0-Assertion Failed!!\n"
2466 "Expression: %s\n"
2467 "Location : %s(%d) %s\n",
2468 pszExpr, pszFile, uLine, pszFunction));
2469
2470 /*
2471 * To the global VMM buffer.
2472 */
2473 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2474 if (pVM)
2475 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2476 "\n!!R0-Assertion Failed!!\n"
2477 "Expression: %.*s\n"
2478 "Location : %s(%d) %s\n",
2479 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2480 pszFile, uLine, pszFunction);
2481
2482 /*
2483 * Continue the normal way.
2484 */
2485 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2486}
2487
2488
2489/**
2490 * Callback for RTLogFormatV which writes to the ring-3 log port.
2491 * See PFNLOGOUTPUT() for details.
2492 */
2493static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2494{
2495 for (size_t i = 0; i < cbChars; i++)
2496 {
2497 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2498 }
2499
2500 NOREF(pv);
2501 return cbChars;
2502}
2503
2504
2505/**
2506 * Override this so we can push it up to ring-3.
2507 *
2508 * @param pszFormat The format string.
2509 * @param va Arguments.
2510 */
2511DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2512{
2513 va_list vaCopy;
2514
2515 /*
2516 * Push the message to the loggers.
2517 */
2518 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2519 if (pLog)
2520 {
2521 va_copy(vaCopy, va);
2522 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2523 va_end(vaCopy);
2524 }
2525 pLog = RTLogRelGetDefaultInstance();
2526 if (pLog)
2527 {
2528 va_copy(vaCopy, va);
2529 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2530 va_end(vaCopy);
2531 }
2532
2533 /*
2534 * Push it to the global VMM buffer.
2535 */
2536 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2537 if (pVM)
2538 {
2539 va_copy(vaCopy, va);
2540 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2541 va_end(vaCopy);
2542 }
2543
2544 /*
2545 * Continue the normal way.
2546 */
2547 RTAssertMsg2V(pszFormat, va);
2548}
2549
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette