VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 90161

Last change on this file since 90161 was 90161, checked in by vboxsync, 4 years ago

VMM: New status VERR_VMM_CONTEXT_HOOK_STILL_ENABLED for guru condition in VMMR0. bugref:10064 ticketref:20090

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 104.1 KB
Line 
1/* $Id: VMMR0.cpp 90161 2021-07-12 23:08:00Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_NEM_R0
31# include <VBox/vmm/nem.h>
32#endif
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/stam.h>
35#include <VBox/vmm/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvm.h>
39#ifdef VBOX_WITH_PCI_PASSTHROUGH
40# include <VBox/vmm/pdmpci.h>
41#endif
42#include <VBox/vmm/apic.h>
43
44#include <VBox/vmm/gvmm.h>
45#include <VBox/vmm/gmm.h>
46#include <VBox/vmm/gim.h>
47#include <VBox/intnet.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/param.h>
50#include <VBox/err.h>
51#include <VBox/version.h>
52#include <VBox/log.h>
53
54#include <iprt/asm-amd64-x86.h>
55#include <iprt/assert.h>
56#include <iprt/crc.h>
57#include <iprt/mp.h>
58#include <iprt/once.h>
59#include <iprt/stdarg.h>
60#include <iprt/string.h>
61#include <iprt/thread.h>
62#include <iprt/timer.h>
63#include <iprt/time.h>
64
65#include "dtrace/VBoxVMM.h"
66
67
68#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
69# pragma intrinsic(_AddressOfReturnAddress)
70#endif
71
72#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
73# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
74#endif
75
76
77
78/*********************************************************************************************************************************
79* Defined Constants And Macros *
80*********************************************************************************************************************************/
81/** @def VMM_CHECK_SMAP_SETUP
82 * SMAP check setup. */
83/** @def VMM_CHECK_SMAP_CHECK
84 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
85 * it will be logged and @a a_BadExpr is executed. */
86/** @def VMM_CHECK_SMAP_CHECK2
87 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
88 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
89 * executed. */
90#if (defined(VBOX_STRICT) || 1) && !defined(VBOX_WITH_RAM_IN_KERNEL)
91# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
92# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
93 do { \
94 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
95 { \
96 RTCCUINTREG fEflCheck = ASMGetFlags(); \
97 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
98 { /* likely */ } \
99 else \
100 { \
101 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
102 a_BadExpr; \
103 } \
104 } \
105 } while (0)
106# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \
107 do { \
108 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
109 { \
110 RTCCUINTREG fEflCheck = ASMGetFlags(); \
111 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
112 { /* likely */ } \
113 else if (a_pGVM) \
114 { \
115 SUPR0BadContext((a_pGVM)->pSession, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
116 RTStrPrintf((a_pGVM)->vmm.s.szRing0AssertMsg1, sizeof((a_pGVM)->vmm.s.szRing0AssertMsg1), \
117 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
118 a_BadExpr; \
119 } \
120 else \
121 { \
122 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
123 a_BadExpr; \
124 } \
125 } \
126 } while (0)
127#else
128# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
129# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
130# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) NOREF(fKernelFeatures)
131#endif
132
133
134/*********************************************************************************************************************************
135* Internal Functions *
136*********************************************************************************************************************************/
137RT_C_DECLS_BEGIN
138#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
139extern uint64_t __udivdi3(uint64_t, uint64_t);
140extern uint64_t __umoddi3(uint64_t, uint64_t);
141#endif
142RT_C_DECLS_END
143
144
145/*********************************************************************************************************************************
146* Global Variables *
147*********************************************************************************************************************************/
148/** Drag in necessary library bits.
149 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
150struct CLANG11WEIRDNOTHROW { PFNRT pfn; } g_VMMR0Deps[] =
151{
152 { (PFNRT)RTCrc32 },
153 { (PFNRT)RTOnce },
154#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
155 { (PFNRT)__udivdi3 },
156 { (PFNRT)__umoddi3 },
157#endif
158 { NULL }
159};
160
161#ifdef RT_OS_SOLARIS
162/* Dependency information for the native solaris loader. */
163extern "C" { char _depends_on[] = "vboxdrv"; }
164#endif
165
166
167/**
168 * Initialize the module.
169 * This is called when we're first loaded.
170 *
171 * @returns 0 on success.
172 * @returns VBox status on failure.
173 * @param hMod Image handle for use in APIs.
174 */
175DECLEXPORT(int) ModuleInit(void *hMod)
176{
177 VMM_CHECK_SMAP_SETUP();
178 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
179
180#ifdef VBOX_WITH_DTRACE_R0
181 /*
182 * The first thing to do is register the static tracepoints.
183 * (Deregistration is automatic.)
184 */
185 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
186 if (RT_FAILURE(rc2))
187 return rc2;
188#endif
189 LogFlow(("ModuleInit:\n"));
190
191#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
192 /*
193 * Display the CMOS debug code.
194 */
195 ASMOutU8(0x72, 0x03);
196 uint8_t bDebugCode = ASMInU8(0x73);
197 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
198 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
199#endif
200
201 /*
202 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
203 */
204 int rc = vmmInitFormatTypes();
205 if (RT_SUCCESS(rc))
206 {
207 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
208 rc = GVMMR0Init();
209 if (RT_SUCCESS(rc))
210 {
211 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
212 rc = GMMR0Init();
213 if (RT_SUCCESS(rc))
214 {
215 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
216 rc = HMR0Init();
217 if (RT_SUCCESS(rc))
218 {
219 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
220
221 PDMR0Init(hMod);
222 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
223
224 rc = PGMRegisterStringFormatTypes();
225 if (RT_SUCCESS(rc))
226 {
227 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
228#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
229 rc = PGMR0DynMapInit();
230#endif
231 if (RT_SUCCESS(rc))
232 {
233 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
234 rc = IntNetR0Init();
235 if (RT_SUCCESS(rc))
236 {
237#ifdef VBOX_WITH_PCI_PASSTHROUGH
238 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
239 rc = PciRawR0Init();
240#endif
241 if (RT_SUCCESS(rc))
242 {
243 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
244 rc = CPUMR0ModuleInit();
245 if (RT_SUCCESS(rc))
246 {
247#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
248 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
249 rc = vmmR0TripleFaultHackInit();
250 if (RT_SUCCESS(rc))
251#endif
252 {
253 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
254 if (RT_SUCCESS(rc))
255 {
256 LogFlow(("ModuleInit: returns success\n"));
257 return VINF_SUCCESS;
258 }
259 }
260
261 /*
262 * Bail out.
263 */
264#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
265 vmmR0TripleFaultHackTerm();
266#endif
267 }
268 else
269 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
270#ifdef VBOX_WITH_PCI_PASSTHROUGH
271 PciRawR0Term();
272#endif
273 }
274 else
275 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
276 IntNetR0Term();
277 }
278 else
279 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
280#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
281 PGMR0DynMapTerm();
282#endif
283 }
284 else
285 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
286 PGMDeregisterStringFormatTypes();
287 }
288 else
289 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
290 HMR0Term();
291 }
292 else
293 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
294 GMMR0Term();
295 }
296 else
297 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
298 GVMMR0Term();
299 }
300 else
301 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
302 vmmTermFormatTypes();
303 }
304 else
305 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
306
307 LogFlow(("ModuleInit: failed %Rrc\n", rc));
308 return rc;
309}
310
311
312/**
313 * Terminate the module.
314 * This is called when we're finally unloaded.
315 *
316 * @param hMod Image handle for use in APIs.
317 */
318DECLEXPORT(void) ModuleTerm(void *hMod)
319{
320 NOREF(hMod);
321 LogFlow(("ModuleTerm:\n"));
322
323 /*
324 * Terminate the CPUM module (Local APIC cleanup).
325 */
326 CPUMR0ModuleTerm();
327
328 /*
329 * Terminate the internal network service.
330 */
331 IntNetR0Term();
332
333 /*
334 * PGM (Darwin), HM and PciRaw global cleanup.
335 */
336#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
337 PGMR0DynMapTerm();
338#endif
339#ifdef VBOX_WITH_PCI_PASSTHROUGH
340 PciRawR0Term();
341#endif
342 PGMDeregisterStringFormatTypes();
343 HMR0Term();
344#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
345 vmmR0TripleFaultHackTerm();
346#endif
347
348 /*
349 * Destroy the GMM and GVMM instances.
350 */
351 GMMR0Term();
352 GVMMR0Term();
353
354 vmmTermFormatTypes();
355
356 LogFlow(("ModuleTerm: returns\n"));
357}
358
359
360/**
361 * Initiates the R0 driver for a particular VM instance.
362 *
363 * @returns VBox status code.
364 *
365 * @param pGVM The global (ring-0) VM structure.
366 * @param uSvnRev The SVN revision of the ring-3 part.
367 * @param uBuildType Build type indicator.
368 * @thread EMT(0)
369 */
370static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
371{
372 VMM_CHECK_SMAP_SETUP();
373 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
374
375 /*
376 * Match the SVN revisions and build type.
377 */
378 if (uSvnRev != VMMGetSvnRev())
379 {
380 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
381 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
382 return VERR_VMM_R0_VERSION_MISMATCH;
383 }
384 if (uBuildType != vmmGetBuildType())
385 {
386 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
387 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
388 return VERR_VMM_R0_VERSION_MISMATCH;
389 }
390
391 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
392 if (RT_FAILURE(rc))
393 return rc;
394
395#ifdef LOG_ENABLED
396 /*
397 * Register the EMT R0 logger instance for VCPU 0.
398 */
399 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
400
401 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
402 if (pR0Logger)
403 {
404# if 0 /* testing of the logger. */
405 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
406 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
407 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
408 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
409
410 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
411 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
412 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
413 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
414
415 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
416 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
417 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
418 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
419
420 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
421 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
422 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
423 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
424 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
425 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
426
427 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
428 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
429
430 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
431 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
432 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
433# endif
434 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pGVM->pSession));
435 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
436 pR0Logger->fRegistered = true;
437 }
438#endif /* LOG_ENABLED */
439
440 /*
441 * Check if the host supports high resolution timers or not.
442 */
443 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
444 && !RTTimerCanDoHighResolution())
445 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
446
447 /*
448 * Initialize the per VM data for GVMM and GMM.
449 */
450 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
451 rc = GVMMR0InitVM(pGVM);
452 if (RT_SUCCESS(rc))
453 {
454 /*
455 * Init HM, CPUM and PGM (Darwin only).
456 */
457 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
458 rc = HMR0InitVM(pGVM);
459 if (RT_SUCCESS(rc))
460 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
461 if (RT_SUCCESS(rc))
462 {
463 rc = CPUMR0InitVM(pGVM);
464 if (RT_SUCCESS(rc))
465 {
466 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
467 rc = PGMR0InitVM(pGVM);
468 if (RT_SUCCESS(rc))
469 {
470 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
471 rc = EMR0InitVM(pGVM);
472 if (RT_SUCCESS(rc))
473 {
474 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
475#ifdef VBOX_WITH_PCI_PASSTHROUGH
476 rc = PciRawR0InitVM(pGVM);
477#endif
478 if (RT_SUCCESS(rc))
479 {
480 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
481 rc = GIMR0InitVM(pGVM);
482 if (RT_SUCCESS(rc))
483 {
484 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION);
485 if (RT_SUCCESS(rc))
486 {
487 GVMMR0DoneInitVM(pGVM);
488
489 /*
490 * Collect a bit of info for the VM release log.
491 */
492 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
493 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
494
495 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
496 return rc;
497 }
498
499 /* bail out*/
500 GIMR0TermVM(pGVM);
501 }
502#ifdef VBOX_WITH_PCI_PASSTHROUGH
503 PciRawR0TermVM(pGVM);
504#endif
505 }
506 }
507 }
508 }
509 HMR0TermVM(pGVM);
510 }
511 }
512
513 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
514 return rc;
515}
516
517
518/**
519 * Does EMT specific VM initialization.
520 *
521 * @returns VBox status code.
522 * @param pGVM The ring-0 VM structure.
523 * @param idCpu The EMT that's calling.
524 */
525static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
526{
527 /* Paranoia (caller checked these already). */
528 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
529 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
530
531#ifdef LOG_ENABLED
532 /*
533 * Registration of ring 0 loggers.
534 */
535 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
536 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
537 if ( pR0Logger
538 && !pR0Logger->fRegistered)
539 {
540 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
541 pR0Logger->fRegistered = true;
542 }
543#endif
544
545 return VINF_SUCCESS;
546}
547
548
549
550/**
551 * Terminates the R0 bits for a particular VM instance.
552 *
553 * This is normally called by ring-3 as part of the VM termination process, but
554 * may alternatively be called during the support driver session cleanup when
555 * the VM object is destroyed (see GVMM).
556 *
557 * @returns VBox status code.
558 *
559 * @param pGVM The global (ring-0) VM structure.
560 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
561 * thread.
562 * @thread EMT(0) or session clean up thread.
563 */
564VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
565{
566 /*
567 * Check EMT(0) claim if we're called from userland.
568 */
569 if (idCpu != NIL_VMCPUID)
570 {
571 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
572 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
573 if (RT_FAILURE(rc))
574 return rc;
575 }
576
577#ifdef VBOX_WITH_PCI_PASSTHROUGH
578 PciRawR0TermVM(pGVM);
579#endif
580
581 /*
582 * Tell GVMM what we're up to and check that we only do this once.
583 */
584 if (GVMMR0DoingTermVM(pGVM))
585 {
586 GIMR0TermVM(pGVM);
587
588 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
589 * here to make sure we don't leak any shared pages if we crash... */
590#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
591 PGMR0DynMapTermVM(pGVM);
592#endif
593 HMR0TermVM(pGVM);
594 }
595
596 /*
597 * Deregister the logger.
598 */
599 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
600 return VINF_SUCCESS;
601}
602
603
604/**
605 * An interrupt or unhalt force flag is set, deal with it.
606 *
607 * @returns VINF_SUCCESS (or VINF_EM_HALT).
608 * @param pVCpu The cross context virtual CPU structure.
609 * @param uMWait Result from EMMonitorWaitIsActive().
610 * @param enmInterruptibility Guest CPU interruptbility level.
611 */
612static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
613{
614 Assert(!TRPMHasTrap(pVCpu));
615 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
616 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
617
618 /*
619 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
620 */
621 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
622 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
623 {
624 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
625 {
626 uint8_t u8Interrupt = 0;
627 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
628 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
629 if (RT_SUCCESS(rc))
630 {
631 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
632
633 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
634 AssertRCSuccess(rc);
635 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
636 return rc;
637 }
638 }
639 }
640 /*
641 * SMI is not implemented yet, at least not here.
642 */
643 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
644 {
645 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #3\n", pVCpu->idCpu));
646 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
647 return VINF_EM_HALT;
648 }
649 /*
650 * NMI.
651 */
652 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
653 {
654 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
655 {
656 /** @todo later. */
657 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #2 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
658 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
659 return VINF_EM_HALT;
660 }
661 }
662 /*
663 * Nested-guest virtual interrupt.
664 */
665 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
666 {
667 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
668 {
669 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
670 * here before injecting the virtual interrupt. See emR3ForcedActions
671 * for details. */
672 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #1 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
673 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
674 return VINF_EM_HALT;
675 }
676 }
677
678 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
679 {
680 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
681 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (UNHALT)\n", pVCpu->idCpu));
682 return VINF_SUCCESS;
683 }
684 if (uMWait > 1)
685 {
686 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
687 Log11(("vmmR0DoHaltInterrupt: CPU%d success VINF_SUCCESS (uMWait=%u > 1)\n", pVCpu->idCpu, uMWait));
688 return VINF_SUCCESS;
689 }
690
691 Log12(("vmmR0DoHaltInterrupt: CPU%d failed #0 (uMWait=%u enmInt=%d)\n", pVCpu->idCpu, uMWait, enmInterruptibility));
692 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3);
693 return VINF_EM_HALT;
694}
695
696
697/**
698 * This does one round of vmR3HaltGlobal1Halt().
699 *
700 * The rational here is that we'll reduce latency in interrupt situations if we
701 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
702 * MWAIT), but do one round of blocking here instead and hope the interrupt is
703 * raised in the meanwhile.
704 *
705 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
706 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
707 * ring-0 call (unless we're too close to a timer event). When the interrupt
708 * wakes us up, we'll return from ring-0 and EM will by instinct do a
709 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
710 * back to VMMR0EntryFast().
711 *
712 * @returns VINF_SUCCESS or VINF_EM_HALT.
713 * @param pGVM The ring-0 VM structure.
714 * @param pGVCpu The ring-0 virtual CPU structure.
715 *
716 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
717 * the VM module, probably to VMM. Then this would be more weird wrt
718 * parameters and statistics.
719 */
720static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
721{
722 /*
723 * Do spin stat historization.
724 */
725 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
726 { /* likely */ }
727 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
728 {
729 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
730 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
731 }
732 else
733 {
734 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
735 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
736 }
737
738 /*
739 * Flags that makes us go to ring-3.
740 */
741 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
742 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
743 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
744 | VM_FF_PGM_NO_MEMORY | VM_FF_DEBUG_SUSPEND;
745 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
746 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
747 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
748 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
749
750 /*
751 * Check preconditions.
752 */
753 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
754 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
755 if ( pGVCpu->vmm.s.fMayHaltInRing0
756 && !TRPMHasTrap(pGVCpu)
757 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
758 || uMWait > 1))
759 {
760 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
761 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
762 {
763 /*
764 * Interrupts pending already?
765 */
766 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
767 APICUpdatePendingInterrupts(pGVCpu);
768
769 /*
770 * Flags that wake up from the halted state.
771 */
772 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
773 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
774
775 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
776 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
777 ASMNopPause();
778
779 /*
780 * Check out how long till the next timer event.
781 */
782 uint64_t u64Delta;
783 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
784
785 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
786 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
787 {
788 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
789 APICUpdatePendingInterrupts(pGVCpu);
790
791 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
792 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
793
794 /*
795 * Wait if there is enough time to the next timer event.
796 */
797 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
798 {
799 /* If there are few other CPU cores around, we will procrastinate a
800 little before going to sleep, hoping for some device raising an
801 interrupt or similar. Though, the best thing here would be to
802 dynamically adjust the spin count according to its usfulness or
803 something... */
804 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
805 && RTMpGetOnlineCount() >= 4)
806 {
807 /** @todo Figure out how we can skip this if it hasn't help recently...
808 * @bugref{9172#c12} */
809 uint32_t cSpinLoops = 42;
810 while (cSpinLoops-- > 0)
811 {
812 ASMNopPause();
813 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
814 APICUpdatePendingInterrupts(pGVCpu);
815 ASMNopPause();
816 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
817 {
818 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
819 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
820 return VINF_EM_HALT;
821 }
822 ASMNopPause();
823 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
824 {
825 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
826 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
827 return VINF_EM_HALT;
828 }
829 ASMNopPause();
830 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
831 {
832 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
833 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
834 }
835 ASMNopPause();
836 }
837 }
838
839 /*
840 * We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
841 * knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here).
842 * After changing the state we must recheck the force flags of course.
843 */
844 if (VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED))
845 {
846 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
847 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
848 {
849 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
850 APICUpdatePendingInterrupts(pGVCpu);
851
852 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
853 {
854 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
855 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
856 }
857
858 /* Okay, block! */
859 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
860 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
861 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
862 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
863 Log10(("vmmR0DoHalt: CPU%d: halted %llu ns\n", pGVCpu->idCpu, cNsElapsedSchedHalt));
864
865 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
866 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
867 if ( rc == VINF_SUCCESS
868 || rc == VERR_INTERRUPTED)
869 {
870 /* Keep some stats like ring-3 does. */
871 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
872 if (cNsOverslept > 50000)
873 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
874 else if (cNsOverslept < -50000)
875 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
876 else
877 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
878
879 /*
880 * Recheck whether we can resume execution or have to go to ring-3.
881 */
882 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
883 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
884 {
885 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
886 APICUpdatePendingInterrupts(pGVCpu);
887 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
888 {
889 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
890 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
891 }
892 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostNoInt);
893 Log12(("vmmR0DoHalt: CPU%d post #2 - No pending interrupt\n", pGVCpu->idCpu));
894 }
895 else
896 {
897 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PostPendingFF);
898 Log12(("vmmR0DoHalt: CPU%d post #1 - Pending FF\n", pGVCpu->idCpu));
899 }
900 }
901 else
902 {
903 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
904 Log12(("vmmR0DoHalt: CPU%d GVMMR0SchedHalt failed: %Rrc\n", pGVCpu->idCpu, rc));
905 }
906 }
907 else
908 {
909 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
910 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
911 Log12(("vmmR0DoHalt: CPU%d failed #5 - Pending FF\n", pGVCpu->idCpu));
912 }
913 }
914 else
915 {
916 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
917 Log12(("vmmR0DoHalt: CPU%d failed #4 - enmState=%d\n", pGVCpu->idCpu, VMCPU_GET_STATE(pGVCpu)));
918 }
919 }
920 else
921 {
922 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3SmallDelta);
923 Log12(("vmmR0DoHalt: CPU%d failed #3 - delta too small: %RU64\n", pGVCpu->idCpu, u64Delta));
924 }
925 }
926 else
927 {
928 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
929 Log12(("vmmR0DoHalt: CPU%d failed #2 - Pending FF\n", pGVCpu->idCpu));
930 }
931 }
932 else
933 {
934 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3PendingFF);
935 Log12(("vmmR0DoHalt: CPU%d failed #1 - Pending FF\n", pGVCpu->idCpu));
936 }
937 }
938 else
939 {
940 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3Other);
941 Log12(("vmmR0DoHalt: CPU%d failed #0 - fMayHaltInRing0=%d TRPMHasTrap=%d enmInt=%d uMWait=%u\n",
942 pGVCpu->idCpu, pGVCpu->vmm.s.fMayHaltInRing0, TRPMHasTrap(pGVCpu), enmInterruptibility, uMWait));
943 }
944
945 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3);
946 return VINF_EM_HALT;
947}
948
949
950/**
951 * VMM ring-0 thread-context callback.
952 *
953 * This does common HM state updating and calls the HM-specific thread-context
954 * callback.
955 *
956 * @param enmEvent The thread-context event.
957 * @param pvUser Opaque pointer to the VMCPU.
958 *
959 * @thread EMT(pvUser)
960 */
961static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
962{
963 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
964
965 switch (enmEvent)
966 {
967 case RTTHREADCTXEVENT_IN:
968 {
969 /*
970 * Linux may call us with preemption enabled (really!) but technically we
971 * cannot get preempted here, otherwise we end up in an infinite recursion
972 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
973 * ad infinitum). Let's just disable preemption for now...
974 */
975 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
976 * preemption after doing the callout (one or two functions up the
977 * call chain). */
978 /** @todo r=ramshankar: See @bugref{5313#c30}. */
979 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
980 RTThreadPreemptDisable(&ParanoidPreemptState);
981
982 /* We need to update the VCPU <-> host CPU mapping. */
983 RTCPUID idHostCpu;
984 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
985 pVCpu->iHostCpuSet = iHostCpuSet;
986 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
987
988 /* In the very unlikely event that the GIP delta for the CPU we're
989 rescheduled needs calculating, try force a return to ring-3.
990 We unfortunately cannot do the measurements right here. */
991 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
992 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
993
994 /* Invoke the HM-specific thread-context callback. */
995 HMR0ThreadCtxCallback(enmEvent, pvUser);
996
997 /* Restore preemption. */
998 RTThreadPreemptRestore(&ParanoidPreemptState);
999 break;
1000 }
1001
1002 case RTTHREADCTXEVENT_OUT:
1003 {
1004 /* Invoke the HM-specific thread-context callback. */
1005 HMR0ThreadCtxCallback(enmEvent, pvUser);
1006
1007 /*
1008 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
1009 * have the same host CPU associated with it.
1010 */
1011 pVCpu->iHostCpuSet = UINT32_MAX;
1012 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1013 break;
1014 }
1015
1016 default:
1017 /* Invoke the HM-specific thread-context callback. */
1018 HMR0ThreadCtxCallback(enmEvent, pvUser);
1019 break;
1020 }
1021}
1022
1023
1024/**
1025 * Creates thread switching hook for the current EMT thread.
1026 *
1027 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
1028 * platform does not implement switcher hooks, no hooks will be create and the
1029 * member set to NIL_RTTHREADCTXHOOK.
1030 *
1031 * @returns VBox status code.
1032 * @param pVCpu The cross context virtual CPU structure.
1033 * @thread EMT(pVCpu)
1034 */
1035VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
1036{
1037 VMCPU_ASSERT_EMT(pVCpu);
1038 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
1039
1040#if 1 /* To disable this stuff change to zero. */
1041 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
1042 if (RT_SUCCESS(rc))
1043 return rc;
1044#else
1045 RT_NOREF(vmmR0ThreadCtxCallback);
1046 int rc = VERR_NOT_SUPPORTED;
1047#endif
1048
1049 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1050 if (rc == VERR_NOT_SUPPORTED)
1051 return VINF_SUCCESS;
1052
1053 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
1054 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
1055}
1056
1057
1058/**
1059 * Destroys the thread switching hook for the specified VCPU.
1060 *
1061 * @param pVCpu The cross context virtual CPU structure.
1062 * @remarks Can be called from any thread.
1063 */
1064VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
1065{
1066 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
1067 AssertRC(rc);
1068 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1069}
1070
1071
1072/**
1073 * Disables the thread switching hook for this VCPU (if we got one).
1074 *
1075 * @param pVCpu The cross context virtual CPU structure.
1076 * @thread EMT(pVCpu)
1077 *
1078 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
1079 * this call. This means you have to be careful with what you do!
1080 */
1081VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1082{
1083 /*
1084 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1085 * @bugref{7726#c19} explains the need for this trick:
1086 *
1087 * VMXR0CallRing3Callback/SVMR0CallRing3Callback &
1088 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1089 * longjmp & normal return to ring-3, which opens a window where we may be
1090 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
1091 * the CPU starts executing a different EMT. Both functions first disables
1092 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1093 * an opening for getting preempted.
1094 */
1095 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1096 * all the time. */
1097 /** @todo move this into the context hook disabling if(). */
1098 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1099
1100 /*
1101 * Disable the context hook, if we got one.
1102 */
1103 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1104 {
1105 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1106 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1107 AssertRC(rc);
1108 }
1109}
1110
1111
1112/**
1113 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1114 *
1115 * @returns true if registered, false otherwise.
1116 * @param pVCpu The cross context virtual CPU structure.
1117 */
1118DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1119{
1120 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
1121}
1122
1123
1124/**
1125 * Whether thread-context hooks are registered for this VCPU.
1126 *
1127 * @returns true if registered, false otherwise.
1128 * @param pVCpu The cross context virtual CPU structure.
1129 */
1130VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1131{
1132 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1133}
1134
1135
1136/**
1137 * Returns the ring-0 release logger instance.
1138 *
1139 * @returns Pointer to release logger, NULL if not configured.
1140 * @param pVCpu The cross context virtual CPU structure of the caller.
1141 * @thread EMT(pVCpu)
1142 */
1143VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu)
1144{
1145 PVMMR0LOGGER pLogger = pVCpu->vmm.s.pR0RelLoggerR0;
1146 if (pLogger)
1147 return &pLogger->Logger;
1148 return NULL;
1149}
1150
1151
1152#ifdef VBOX_WITH_STATISTICS
1153/**
1154 * Record return code statistics
1155 * @param pVM The cross context VM structure.
1156 * @param pVCpu The cross context virtual CPU structure.
1157 * @param rc The status code.
1158 */
1159static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1160{
1161 /*
1162 * Collect statistics.
1163 */
1164 switch (rc)
1165 {
1166 case VINF_SUCCESS:
1167 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1168 break;
1169 case VINF_EM_RAW_INTERRUPT:
1170 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1171 break;
1172 case VINF_EM_RAW_INTERRUPT_HYPER:
1173 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1174 break;
1175 case VINF_EM_RAW_GUEST_TRAP:
1176 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1177 break;
1178 case VINF_EM_RAW_RING_SWITCH:
1179 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1180 break;
1181 case VINF_EM_RAW_RING_SWITCH_INT:
1182 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1183 break;
1184 case VINF_EM_RAW_STALE_SELECTOR:
1185 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1186 break;
1187 case VINF_EM_RAW_IRET_TRAP:
1188 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1189 break;
1190 case VINF_IOM_R3_IOPORT_READ:
1191 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1192 break;
1193 case VINF_IOM_R3_IOPORT_WRITE:
1194 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1195 break;
1196 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1197 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1198 break;
1199 case VINF_IOM_R3_MMIO_READ:
1200 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1201 break;
1202 case VINF_IOM_R3_MMIO_WRITE:
1203 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1204 break;
1205 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1206 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1207 break;
1208 case VINF_IOM_R3_MMIO_READ_WRITE:
1209 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1210 break;
1211 case VINF_PATM_HC_MMIO_PATCH_READ:
1212 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1213 break;
1214 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1215 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1216 break;
1217 case VINF_CPUM_R3_MSR_READ:
1218 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1219 break;
1220 case VINF_CPUM_R3_MSR_WRITE:
1221 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1222 break;
1223 case VINF_EM_RAW_EMULATE_INSTR:
1224 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1225 break;
1226 case VINF_PATCH_EMULATE_INSTR:
1227 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1228 break;
1229 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1230 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1231 break;
1232 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1233 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1234 break;
1235 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1236 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1237 break;
1238 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1239 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1240 break;
1241 case VINF_CSAM_PENDING_ACTION:
1242 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1243 break;
1244 case VINF_PGM_SYNC_CR3:
1245 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1246 break;
1247 case VINF_PATM_PATCH_INT3:
1248 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1249 break;
1250 case VINF_PATM_PATCH_TRAP_PF:
1251 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1252 break;
1253 case VINF_PATM_PATCH_TRAP_GP:
1254 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1255 break;
1256 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1257 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1258 break;
1259 case VINF_EM_RESCHEDULE_REM:
1260 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1261 break;
1262 case VINF_EM_RAW_TO_R3:
1263 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1264 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1265 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1266 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1267 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1268 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1269 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1270 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1271 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1272 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1273 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1274 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1275 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1276 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1277 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1278 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1279 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1280 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1281 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1282 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1283 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1284 else
1285 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1286 break;
1287
1288 case VINF_EM_RAW_TIMER_PENDING:
1289 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1290 break;
1291 case VINF_EM_RAW_INTERRUPT_PENDING:
1292 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1293 break;
1294 case VINF_VMM_CALL_HOST:
1295 switch (pVCpu->vmm.s.enmCallRing3Operation)
1296 {
1297 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1298 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1299 break;
1300 case VMMCALLRING3_PDM_LOCK:
1301 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1302 break;
1303 case VMMCALLRING3_PGM_POOL_GROW:
1304 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1305 break;
1306 case VMMCALLRING3_PGM_LOCK:
1307 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1308 break;
1309 case VMMCALLRING3_PGM_MAP_CHUNK:
1310 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1311 break;
1312 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1313 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1314 break;
1315 case VMMCALLRING3_VMM_LOGGER_FLUSH:
1316 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
1317 break;
1318 case VMMCALLRING3_VM_SET_ERROR:
1319 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1320 break;
1321 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1322 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1323 break;
1324 case VMMCALLRING3_VM_R0_ASSERTION:
1325 default:
1326 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1327 break;
1328 }
1329 break;
1330 case VINF_PATM_DUPLICATE_FUNCTION:
1331 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1332 break;
1333 case VINF_PGM_CHANGE_MODE:
1334 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1335 break;
1336 case VINF_PGM_POOL_FLUSH_PENDING:
1337 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1338 break;
1339 case VINF_EM_PENDING_REQUEST:
1340 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1341 break;
1342 case VINF_EM_HM_PATCH_TPR_INSTR:
1343 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1344 break;
1345 default:
1346 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1347 break;
1348 }
1349}
1350#endif /* VBOX_WITH_STATISTICS */
1351
1352
1353/**
1354 * The Ring 0 entry point, called by the fast-ioctl path.
1355 *
1356 * @param pGVM The global (ring-0) VM structure.
1357 * @param pVMIgnored The cross context VM structure. The return code is
1358 * stored in pVM->vmm.s.iLastGZRc.
1359 * @param idCpu The Virtual CPU ID of the calling EMT.
1360 * @param enmOperation Which operation to execute.
1361 * @remarks Assume called with interrupts _enabled_.
1362 */
1363VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1364{
1365 RT_NOREF(pVMIgnored);
1366
1367 /*
1368 * Validation.
1369 */
1370 if ( idCpu < pGVM->cCpus
1371 && pGVM->cCpus == pGVM->cCpusUnsafe)
1372 { /*likely*/ }
1373 else
1374 {
1375 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1376 return;
1377 }
1378
1379 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1380 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1381 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1382 && pGVCpu->hNativeThreadR0 == hNativeThread))
1383 { /* likely */ }
1384 else
1385 {
1386 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1387 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1388 return;
1389 }
1390
1391 /*
1392 * SMAP fun.
1393 */
1394 VMM_CHECK_SMAP_SETUP();
1395 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1396
1397 /*
1398 * Perform requested operation.
1399 */
1400 switch (enmOperation)
1401 {
1402 /*
1403 * Run guest code using the available hardware acceleration technology.
1404 */
1405 case VMMR0_DO_HM_RUN:
1406 {
1407 for (;;) /* hlt loop */
1408 {
1409 /*
1410 * Disable preemption.
1411 */
1412 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1413 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1414 RTThreadPreemptDisable(&PreemptState);
1415
1416 /*
1417 * Get the host CPU identifiers, make sure they are valid and that
1418 * we've got a TSC delta for the CPU.
1419 */
1420 RTCPUID idHostCpu;
1421 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1422 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1423 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1424 {
1425 pGVCpu->iHostCpuSet = iHostCpuSet;
1426 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1427
1428 /*
1429 * Update the periodic preemption timer if it's active.
1430 */
1431 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1432 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1433 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1434
1435#ifdef VMM_R0_TOUCH_FPU
1436 /*
1437 * Make sure we've got the FPU state loaded so and we don't need to clear
1438 * CR0.TS and get out of sync with the host kernel when loading the guest
1439 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1440 */
1441 CPUMR0TouchHostFpu();
1442#endif
1443 int rc;
1444 bool fPreemptRestored = false;
1445 if (!HMR0SuspendPending())
1446 {
1447 /*
1448 * Enable the context switching hook.
1449 */
1450 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1451 {
1452 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmm.s.hCtxHook));
1453 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1454 }
1455
1456 /*
1457 * Enter HM context.
1458 */
1459 rc = HMR0Enter(pGVCpu);
1460 if (RT_SUCCESS(rc))
1461 {
1462 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1463
1464 /*
1465 * When preemption hooks are in place, enable preemption now that
1466 * we're in HM context.
1467 */
1468 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1469 {
1470 fPreemptRestored = true;
1471 RTThreadPreemptRestore(&PreemptState);
1472 }
1473
1474 /*
1475 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1476 */
1477 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1478 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);
1479 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1480
1481 /*
1482 * Assert sanity on the way out. Using manual assertions code here as normal
1483 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1484 */
1485 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1486 && RT_SUCCESS_NP(rc)
1487 && rc != VINF_VMM_CALL_HOST ))
1488 {
1489 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1490 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1491 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1492 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1493 }
1494 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1495 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1496 {
1497 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1498 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1499 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1500 rc = VERR_VMM_CONTEXT_HOOK_STILL_ENABLED;
1501 }
1502
1503 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1504 }
1505 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1506
1507 /*
1508 * Invalidate the host CPU identifiers before we disable the context
1509 * hook / restore preemption.
1510 */
1511 pGVCpu->iHostCpuSet = UINT32_MAX;
1512 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1513
1514 /*
1515 * Disable context hooks. Due to unresolved cleanup issues, we
1516 * cannot leave the hooks enabled when we return to ring-3.
1517 *
1518 * Note! At the moment HM may also have disabled the hook
1519 * when we get here, but the IPRT API handles that.
1520 */
1521 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1522 {
1523 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1524 RTThreadCtxHookDisable(pGVCpu->vmm.s.hCtxHook);
1525 }
1526 }
1527 /*
1528 * The system is about to go into suspend mode; go back to ring 3.
1529 */
1530 else
1531 {
1532 rc = VINF_EM_RAW_INTERRUPT;
1533 pGVCpu->iHostCpuSet = UINT32_MAX;
1534 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1535 }
1536
1537 /** @todo When HM stops messing with the context hook state, we'll disable
1538 * preemption again before the RTThreadCtxHookDisable call. */
1539 if (!fPreemptRestored)
1540 RTThreadPreemptRestore(&PreemptState);
1541
1542 pGVCpu->vmm.s.iLastGZRc = rc;
1543
1544 /* Fire dtrace probe and collect statistics. */
1545 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1546#ifdef VBOX_WITH_STATISTICS
1547 vmmR0RecordRC(pGVM, pGVCpu, rc);
1548#endif
1549 /*
1550 * If this is a halt.
1551 */
1552 if (rc != VINF_EM_HALT)
1553 { /* we're not in a hurry for a HLT, so prefer this path */ }
1554 else
1555 {
1556 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1557 if (rc == VINF_SUCCESS)
1558 {
1559 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1560 continue;
1561 }
1562 pGVCpu->vmm.s.cR0HaltsToRing3++;
1563 }
1564 }
1565 /*
1566 * Invalid CPU set index or TSC delta in need of measuring.
1567 */
1568 else
1569 {
1570 pGVCpu->iHostCpuSet = UINT32_MAX;
1571 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1572 RTThreadPreemptRestore(&PreemptState);
1573 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1574 {
1575 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1576 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1577 0 /*default cTries*/);
1578 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1579 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1580 else
1581 pGVCpu->vmm.s.iLastGZRc = rc;
1582 }
1583 else
1584 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1585 }
1586 break;
1587
1588 } /* halt loop. */
1589 break;
1590 }
1591
1592#ifdef VBOX_WITH_NEM_R0
1593# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1594 case VMMR0_DO_NEM_RUN:
1595 {
1596 /*
1597 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1598 */
1599 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1600# ifdef VBOXSTRICTRC_STRICT_ENABLED
1601 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1602# else
1603 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1604# endif
1605 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1606 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1607
1608 pGVCpu->vmm.s.iLastGZRc = rc;
1609
1610 /*
1611 * Fire dtrace probe and collect statistics.
1612 */
1613 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1614# ifdef VBOX_WITH_STATISTICS
1615 vmmR0RecordRC(pGVM, pGVCpu, rc);
1616# endif
1617 break;
1618 }
1619# endif
1620#endif
1621
1622 /*
1623 * For profiling.
1624 */
1625 case VMMR0_DO_NOP:
1626 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1627 break;
1628
1629 /*
1630 * Shouldn't happen.
1631 */
1632 default:
1633 AssertMsgFailed(("%#x\n", enmOperation));
1634 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1635 break;
1636 }
1637 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1638}
1639
1640
1641/**
1642 * Validates a session or VM session argument.
1643 *
1644 * @returns true / false accordingly.
1645 * @param pGVM The global (ring-0) VM structure.
1646 * @param pClaimedSession The session claim to validate.
1647 * @param pSession The session argument.
1648 */
1649DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1650{
1651 /* This must be set! */
1652 if (!pSession)
1653 return false;
1654
1655 /* Only one out of the two. */
1656 if (pGVM && pClaimedSession)
1657 return false;
1658 if (pGVM)
1659 pClaimedSession = pGVM->pSession;
1660 return pClaimedSession == pSession;
1661}
1662
1663
1664/**
1665 * VMMR0EntryEx worker function, either called directly or when ever possible
1666 * called thru a longjmp so we can exit safely on failure.
1667 *
1668 * @returns VBox status code.
1669 * @param pGVM The global (ring-0) VM structure.
1670 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1671 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1672 * @param enmOperation Which operation to execute.
1673 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1674 * The support driver validates this if it's present.
1675 * @param u64Arg Some simple constant argument.
1676 * @param pSession The session of the caller.
1677 *
1678 * @remarks Assume called with interrupts _enabled_.
1679 */
1680static int vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1681 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1682{
1683 /*
1684 * Validate pGVM and idCpu for consistency and validity.
1685 */
1686 if (pGVM != NULL)
1687 {
1688 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0))
1689 { /* likely */ }
1690 else
1691 {
1692 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1693 return VERR_INVALID_POINTER;
1694 }
1695
1696 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1697 { /* likely */ }
1698 else
1699 {
1700 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1701 return VERR_INVALID_PARAMETER;
1702 }
1703
1704 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1705 && pGVM->enmVMState <= VMSTATE_TERMINATED
1706 && pGVM->pSession == pSession
1707 && pGVM->pSelf == pGVM))
1708 { /* likely */ }
1709 else
1710 {
1711 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1712 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1713 return VERR_INVALID_POINTER;
1714 }
1715 }
1716 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1717 { /* likely */ }
1718 else
1719 {
1720 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1721 return VERR_INVALID_PARAMETER;
1722 }
1723
1724 /*
1725 * SMAP fun.
1726 */
1727 VMM_CHECK_SMAP_SETUP();
1728 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1729
1730 /*
1731 * Process the request.
1732 */
1733 int rc;
1734 switch (enmOperation)
1735 {
1736 /*
1737 * GVM requests
1738 */
1739 case VMMR0_DO_GVMM_CREATE_VM:
1740 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1741 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1742 else
1743 rc = VERR_INVALID_PARAMETER;
1744 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1745 break;
1746
1747 case VMMR0_DO_GVMM_DESTROY_VM:
1748 if (pReqHdr == NULL && u64Arg == 0)
1749 rc = GVMMR0DestroyVM(pGVM);
1750 else
1751 rc = VERR_INVALID_PARAMETER;
1752 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1753 break;
1754
1755 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1756 if (pGVM != NULL)
1757 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1758 else
1759 rc = VERR_INVALID_PARAMETER;
1760 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1761 break;
1762
1763 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1764 if (pGVM != NULL)
1765 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1766 else
1767 rc = VERR_INVALID_PARAMETER;
1768 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1769 break;
1770
1771 case VMMR0_DO_GVMM_SCHED_HALT:
1772 if (pReqHdr)
1773 return VERR_INVALID_PARAMETER;
1774 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1775 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1776 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1777 break;
1778
1779 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1780 if (pReqHdr || u64Arg)
1781 return VERR_INVALID_PARAMETER;
1782 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1783 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1784 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1785 break;
1786
1787 case VMMR0_DO_GVMM_SCHED_POKE:
1788 if (pReqHdr || u64Arg)
1789 return VERR_INVALID_PARAMETER;
1790 rc = GVMMR0SchedPoke(pGVM, idCpu);
1791 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1792 break;
1793
1794 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1795 if (u64Arg)
1796 return VERR_INVALID_PARAMETER;
1797 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1798 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1799 break;
1800
1801 case VMMR0_DO_GVMM_SCHED_POLL:
1802 if (pReqHdr || u64Arg > 1)
1803 return VERR_INVALID_PARAMETER;
1804 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
1805 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1806 break;
1807
1808 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1809 if (u64Arg)
1810 return VERR_INVALID_PARAMETER;
1811 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1812 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1813 break;
1814
1815 case VMMR0_DO_GVMM_RESET_STATISTICS:
1816 if (u64Arg)
1817 return VERR_INVALID_PARAMETER;
1818 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1819 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1820 break;
1821
1822 /*
1823 * Initialize the R0 part of a VM instance.
1824 */
1825 case VMMR0_DO_VMMR0_INIT:
1826 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1827 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1828 break;
1829
1830 /*
1831 * Does EMT specific ring-0 init.
1832 */
1833 case VMMR0_DO_VMMR0_INIT_EMT:
1834 rc = vmmR0InitVMEmt(pGVM, idCpu);
1835 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1836 break;
1837
1838 /*
1839 * Terminate the R0 part of a VM instance.
1840 */
1841 case VMMR0_DO_VMMR0_TERM:
1842 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
1843 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1844 break;
1845
1846 /*
1847 * Attempt to enable hm mode and check the current setting.
1848 */
1849 case VMMR0_DO_HM_ENABLE:
1850 rc = HMR0EnableAllCpus(pGVM);
1851 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1852 break;
1853
1854 /*
1855 * Setup the hardware accelerated session.
1856 */
1857 case VMMR0_DO_HM_SETUP_VM:
1858 rc = HMR0SetupVM(pGVM);
1859 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1860 break;
1861
1862 /*
1863 * PGM wrappers.
1864 */
1865 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1866 if (idCpu == NIL_VMCPUID)
1867 return VERR_INVALID_CPU_ID;
1868 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
1869 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1870 break;
1871
1872 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1873 if (idCpu == NIL_VMCPUID)
1874 return VERR_INVALID_CPU_ID;
1875 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
1876 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1877 break;
1878
1879 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1880 if (idCpu == NIL_VMCPUID)
1881 return VERR_INVALID_CPU_ID;
1882 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, idCpu);
1883 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1884 break;
1885
1886 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1887 if (idCpu != 0)
1888 return VERR_INVALID_CPU_ID;
1889 rc = PGMR0PhysSetupIoMmu(pGVM);
1890 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1891 break;
1892
1893 case VMMR0_DO_PGM_POOL_GROW:
1894 if (idCpu == NIL_VMCPUID)
1895 return VERR_INVALID_CPU_ID;
1896 rc = PGMR0PoolGrow(pGVM);
1897 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1898 break;
1899
1900 /*
1901 * GMM wrappers.
1902 */
1903 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1904 if (u64Arg)
1905 return VERR_INVALID_PARAMETER;
1906 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1907 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1908 break;
1909
1910 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1911 if (u64Arg)
1912 return VERR_INVALID_PARAMETER;
1913 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1914 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1915 break;
1916
1917 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1918 if (u64Arg)
1919 return VERR_INVALID_PARAMETER;
1920 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1921 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1922 break;
1923
1924 case VMMR0_DO_GMM_FREE_PAGES:
1925 if (u64Arg)
1926 return VERR_INVALID_PARAMETER;
1927 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1928 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1929 break;
1930
1931 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1932 if (u64Arg)
1933 return VERR_INVALID_PARAMETER;
1934 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1935 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1936 break;
1937
1938 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1939 if (u64Arg)
1940 return VERR_INVALID_PARAMETER;
1941 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1942 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1943 break;
1944
1945 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1946 if (idCpu == NIL_VMCPUID)
1947 return VERR_INVALID_CPU_ID;
1948 if (u64Arg)
1949 return VERR_INVALID_PARAMETER;
1950 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1951 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1952 break;
1953
1954 case VMMR0_DO_GMM_BALLOONED_PAGES:
1955 if (u64Arg)
1956 return VERR_INVALID_PARAMETER;
1957 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1958 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1959 break;
1960
1961 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1962 if (u64Arg)
1963 return VERR_INVALID_PARAMETER;
1964 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1965 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1966 break;
1967
1968 case VMMR0_DO_GMM_SEED_CHUNK:
1969 if (pReqHdr)
1970 return VERR_INVALID_PARAMETER;
1971 rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg);
1972 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1973 break;
1974
1975 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1976 if (idCpu == NIL_VMCPUID)
1977 return VERR_INVALID_CPU_ID;
1978 if (u64Arg)
1979 return VERR_INVALID_PARAMETER;
1980 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1981 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1982 break;
1983
1984 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1985 if (idCpu == NIL_VMCPUID)
1986 return VERR_INVALID_CPU_ID;
1987 if (u64Arg)
1988 return VERR_INVALID_PARAMETER;
1989 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1990 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1991 break;
1992
1993 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1994 if (idCpu == NIL_VMCPUID)
1995 return VERR_INVALID_CPU_ID;
1996 if ( u64Arg
1997 || pReqHdr)
1998 return VERR_INVALID_PARAMETER;
1999 rc = GMMR0ResetSharedModules(pGVM, idCpu);
2000 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2001 break;
2002
2003#ifdef VBOX_WITH_PAGE_SHARING
2004 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
2005 {
2006 if (idCpu == NIL_VMCPUID)
2007 return VERR_INVALID_CPU_ID;
2008 if ( u64Arg
2009 || pReqHdr)
2010 return VERR_INVALID_PARAMETER;
2011 rc = GMMR0CheckSharedModules(pGVM, idCpu);
2012 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2013 break;
2014 }
2015#endif
2016
2017#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
2018 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
2019 if (u64Arg)
2020 return VERR_INVALID_PARAMETER;
2021 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
2022 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2023 break;
2024#endif
2025
2026 case VMMR0_DO_GMM_QUERY_STATISTICS:
2027 if (u64Arg)
2028 return VERR_INVALID_PARAMETER;
2029 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
2030 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2031 break;
2032
2033 case VMMR0_DO_GMM_RESET_STATISTICS:
2034 if (u64Arg)
2035 return VERR_INVALID_PARAMETER;
2036 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
2037 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2038 break;
2039
2040 /*
2041 * A quick GCFGM mock-up.
2042 */
2043 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
2044 case VMMR0_DO_GCFGM_SET_VALUE:
2045 case VMMR0_DO_GCFGM_QUERY_VALUE:
2046 {
2047 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2048 return VERR_INVALID_PARAMETER;
2049 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
2050 if (pReq->Hdr.cbReq != sizeof(*pReq))
2051 return VERR_INVALID_PARAMETER;
2052 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
2053 {
2054 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2055 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2056 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2057 }
2058 else
2059 {
2060 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2061 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2062 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2063 }
2064 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2065 break;
2066 }
2067
2068 /*
2069 * PDM Wrappers.
2070 */
2071 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2072 {
2073 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2074 return VERR_INVALID_PARAMETER;
2075 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2076 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2077 break;
2078 }
2079
2080 case VMMR0_DO_PDM_DEVICE_CREATE:
2081 {
2082 if (!pReqHdr || u64Arg || idCpu != 0)
2083 return VERR_INVALID_PARAMETER;
2084 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
2085 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2086 break;
2087 }
2088
2089 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2090 {
2091 if (!pReqHdr || u64Arg)
2092 return VERR_INVALID_PARAMETER;
2093 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr, idCpu);
2094 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2095 break;
2096 }
2097
2098 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2099 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2100 {
2101 if (!pReqHdr || u64Arg || idCpu != 0)
2102 return VERR_INVALID_PARAMETER;
2103 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2104 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2105 break;
2106 }
2107
2108 /*
2109 * Requests to the internal networking service.
2110 */
2111 case VMMR0_DO_INTNET_OPEN:
2112 {
2113 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2114 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2115 return VERR_INVALID_PARAMETER;
2116 rc = IntNetR0OpenReq(pSession, pReq);
2117 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2118 break;
2119 }
2120
2121 case VMMR0_DO_INTNET_IF_CLOSE:
2122 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2123 return VERR_INVALID_PARAMETER;
2124 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2125 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2126 break;
2127
2128
2129 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2130 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2131 return VERR_INVALID_PARAMETER;
2132 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2133 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2134 break;
2135
2136 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2137 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2138 return VERR_INVALID_PARAMETER;
2139 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2140 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2141 break;
2142
2143 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2144 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2145 return VERR_INVALID_PARAMETER;
2146 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2147 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2148 break;
2149
2150 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2151 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2152 return VERR_INVALID_PARAMETER;
2153 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2154 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2155 break;
2156
2157 case VMMR0_DO_INTNET_IF_SEND:
2158 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2159 return VERR_INVALID_PARAMETER;
2160 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2161 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2162 break;
2163
2164 case VMMR0_DO_INTNET_IF_WAIT:
2165 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2166 return VERR_INVALID_PARAMETER;
2167 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2168 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2169 break;
2170
2171 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2172 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2173 return VERR_INVALID_PARAMETER;
2174 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2175 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2176 break;
2177
2178#if 0 //def VBOX_WITH_PCI_PASSTHROUGH
2179 /*
2180 * Requests to host PCI driver service.
2181 */
2182 case VMMR0_DO_PCIRAW_REQ:
2183 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2184 return VERR_INVALID_PARAMETER;
2185 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2186 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2187 break;
2188#endif
2189
2190 /*
2191 * NEM requests.
2192 */
2193#ifdef VBOX_WITH_NEM_R0
2194# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2195 case VMMR0_DO_NEM_INIT_VM:
2196 if (u64Arg || pReqHdr || idCpu != 0)
2197 return VERR_INVALID_PARAMETER;
2198 rc = NEMR0InitVM(pGVM);
2199 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2200 break;
2201
2202 case VMMR0_DO_NEM_INIT_VM_PART_2:
2203 if (u64Arg || pReqHdr || idCpu != 0)
2204 return VERR_INVALID_PARAMETER;
2205 rc = NEMR0InitVMPart2(pGVM);
2206 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2207 break;
2208
2209 case VMMR0_DO_NEM_MAP_PAGES:
2210 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2211 return VERR_INVALID_PARAMETER;
2212 rc = NEMR0MapPages(pGVM, idCpu);
2213 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2214 break;
2215
2216 case VMMR0_DO_NEM_UNMAP_PAGES:
2217 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2218 return VERR_INVALID_PARAMETER;
2219 rc = NEMR0UnmapPages(pGVM, idCpu);
2220 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2221 break;
2222
2223 case VMMR0_DO_NEM_EXPORT_STATE:
2224 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2225 return VERR_INVALID_PARAMETER;
2226 rc = NEMR0ExportState(pGVM, idCpu);
2227 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2228 break;
2229
2230 case VMMR0_DO_NEM_IMPORT_STATE:
2231 if (pReqHdr || idCpu == NIL_VMCPUID)
2232 return VERR_INVALID_PARAMETER;
2233 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2234 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2235 break;
2236
2237 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2238 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2239 return VERR_INVALID_PARAMETER;
2240 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2241 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2242 break;
2243
2244 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2245 if (pReqHdr || idCpu == NIL_VMCPUID)
2246 return VERR_INVALID_PARAMETER;
2247 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2248 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2249 break;
2250
2251 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2252 if (u64Arg || pReqHdr)
2253 return VERR_INVALID_PARAMETER;
2254 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2255 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2256 break;
2257
2258# if 1 && defined(DEBUG_bird)
2259 case VMMR0_DO_NEM_EXPERIMENT:
2260 if (pReqHdr)
2261 return VERR_INVALID_PARAMETER;
2262 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2263 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2264 break;
2265# endif
2266# endif
2267#endif
2268
2269 /*
2270 * IOM requests.
2271 */
2272 case VMMR0_DO_IOM_GROW_IO_PORTS:
2273 {
2274 if (pReqHdr || idCpu != 0)
2275 return VERR_INVALID_PARAMETER;
2276 rc = IOMR0IoPortGrowRegistrationTables(pGVM, u64Arg);
2277 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2278 break;
2279 }
2280
2281 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2282 {
2283 if (pReqHdr || idCpu != 0)
2284 return VERR_INVALID_PARAMETER;
2285 rc = IOMR0IoPortGrowStatisticsTable(pGVM, u64Arg);
2286 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2287 break;
2288 }
2289
2290 case VMMR0_DO_IOM_GROW_MMIO_REGS:
2291 {
2292 if (pReqHdr || idCpu != 0)
2293 return VERR_INVALID_PARAMETER;
2294 rc = IOMR0MmioGrowRegistrationTables(pGVM, u64Arg);
2295 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2296 break;
2297 }
2298
2299 case VMMR0_DO_IOM_GROW_MMIO_STATS:
2300 {
2301 if (pReqHdr || idCpu != 0)
2302 return VERR_INVALID_PARAMETER;
2303 rc = IOMR0MmioGrowStatisticsTable(pGVM, u64Arg);
2304 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2305 break;
2306 }
2307
2308 case VMMR0_DO_IOM_SYNC_STATS_INDICES:
2309 {
2310 if (pReqHdr || idCpu != 0)
2311 return VERR_INVALID_PARAMETER;
2312 rc = IOMR0IoPortSyncStatisticsIndices(pGVM);
2313 if (RT_SUCCESS(rc))
2314 rc = IOMR0MmioSyncStatisticsIndices(pGVM);
2315 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2316 break;
2317 }
2318
2319 /*
2320 * DBGF requests.
2321 */
2322#ifdef VBOX_WITH_DBGF_TRACING
2323 case VMMR0_DO_DBGF_TRACER_CREATE:
2324 {
2325 if (!pReqHdr || u64Arg || idCpu != 0)
2326 return VERR_INVALID_PARAMETER;
2327 rc = DBGFR0TracerCreateReqHandler(pGVM, (PDBGFTRACERCREATEREQ)pReqHdr);
2328 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2329 break;
2330 }
2331
2332 case VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER:
2333 {
2334 if (!pReqHdr || u64Arg)
2335 return VERR_INVALID_PARAMETER;
2336# if 0 /** @todo */
2337 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu);
2338# else
2339 rc = VERR_NOT_IMPLEMENTED;
2340# endif
2341 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2342 break;
2343 }
2344#endif
2345
2346 case VMMR0_DO_DBGF_BP_INIT:
2347 {
2348 if (!pReqHdr || u64Arg || idCpu != 0)
2349 return VERR_INVALID_PARAMETER;
2350 rc = DBGFR0BpInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2351 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2352 break;
2353 }
2354
2355 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2356 {
2357 if (!pReqHdr || u64Arg || idCpu != 0)
2358 return VERR_INVALID_PARAMETER;
2359 rc = DBGFR0BpChunkAllocReqHandler(pGVM, (PDBGFBPCHUNKALLOCREQ)pReqHdr);
2360 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2361 break;
2362 }
2363
2364 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2365 {
2366 if (!pReqHdr || u64Arg || idCpu != 0)
2367 return VERR_INVALID_PARAMETER;
2368 rc = DBGFR0BpL2TblChunkAllocReqHandler(pGVM, (PDBGFBPL2TBLCHUNKALLOCREQ)pReqHdr);
2369 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2370 break;
2371 }
2372
2373 case VMMR0_DO_DBGF_BP_OWNER_INIT:
2374 {
2375 if (!pReqHdr || u64Arg || idCpu != 0)
2376 return VERR_INVALID_PARAMETER;
2377 rc = DBGFR0BpOwnerInitReqHandler(pGVM, (PDBGFBPOWNERINITREQ)pReqHdr);
2378 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2379 break;
2380 }
2381
2382 case VMMR0_DO_DBGF_BP_PORTIO_INIT:
2383 {
2384 if (!pReqHdr || u64Arg || idCpu != 0)
2385 return VERR_INVALID_PARAMETER;
2386 rc = DBGFR0BpPortIoInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2387 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2388 break;
2389 }
2390
2391
2392 /*
2393 * TM requests.
2394 */
2395 case VMMR0_DO_TM_GROW_TIMER_QUEUE:
2396 {
2397 if (pReqHdr || idCpu == NIL_VMCPUID)
2398 return VERR_INVALID_PARAMETER;
2399 rc = TMR0TimerQueueGrow(pGVM, RT_HI_U32(u64Arg), RT_LO_U32(u64Arg));
2400 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2401 break;
2402 }
2403
2404 /*
2405 * For profiling.
2406 */
2407 case VMMR0_DO_NOP:
2408 case VMMR0_DO_SLOW_NOP:
2409 return VINF_SUCCESS;
2410
2411 /*
2412 * For testing Ring-0 APIs invoked in this environment.
2413 */
2414 case VMMR0_DO_TESTS:
2415 /** @todo make new test */
2416 return VINF_SUCCESS;
2417
2418 default:
2419 /*
2420 * We're returning VERR_NOT_SUPPORT here so we've got something else
2421 * than -1 which the interrupt gate glue code might return.
2422 */
2423 Log(("operation %#x is not supported\n", enmOperation));
2424 return VERR_NOT_SUPPORTED;
2425 }
2426 return rc;
2427}
2428
2429
2430/**
2431 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
2432 */
2433typedef struct VMMR0ENTRYEXARGS
2434{
2435 PGVM pGVM;
2436 VMCPUID idCpu;
2437 VMMR0OPERATION enmOperation;
2438 PSUPVMMR0REQHDR pReq;
2439 uint64_t u64Arg;
2440 PSUPDRVSESSION pSession;
2441} VMMR0ENTRYEXARGS;
2442/** Pointer to a vmmR0EntryExWrapper argument package. */
2443typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
2444
2445/**
2446 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2447 *
2448 * @returns VBox status code.
2449 * @param pvArgs The argument package
2450 */
2451static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2452{
2453 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2454 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2455 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2456 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2457 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2458 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2459}
2460
2461
2462/**
2463 * The Ring 0 entry point, called by the support library (SUP).
2464 *
2465 * @returns VBox status code.
2466 * @param pGVM The global (ring-0) VM structure.
2467 * @param pVM The cross context VM structure.
2468 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2469 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2470 * @param enmOperation Which operation to execute.
2471 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2472 * @param u64Arg Some simple constant argument.
2473 * @param pSession The session of the caller.
2474 * @remarks Assume called with interrupts _enabled_.
2475 */
2476VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2477 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2478{
2479 /*
2480 * Requests that should only happen on the EMT thread will be
2481 * wrapped in a setjmp so we can assert without causing trouble.
2482 */
2483 if ( pVM != NULL
2484 && pGVM != NULL
2485 && pVM == pGVM /** @todo drop pVM or pGVM */
2486 && idCpu < pGVM->cCpus
2487 && pGVM->pSession == pSession
2488 && pGVM->pSelf == pVM)
2489 {
2490 switch (enmOperation)
2491 {
2492 /* These might/will be called before VMMR3Init. */
2493 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2494 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2495 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2496 case VMMR0_DO_GMM_FREE_PAGES:
2497 case VMMR0_DO_GMM_BALLOONED_PAGES:
2498 /* On the mac we might not have a valid jmp buf, so check these as well. */
2499 case VMMR0_DO_VMMR0_INIT:
2500 case VMMR0_DO_VMMR0_TERM:
2501
2502 case VMMR0_DO_PDM_DEVICE_CREATE:
2503 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2504 case VMMR0_DO_IOM_GROW_IO_PORTS:
2505 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2506 case VMMR0_DO_DBGF_BP_INIT:
2507 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2508 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2509 {
2510 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2511 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2512 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2513 && pGVCpu->hNativeThreadR0 == hNativeThread))
2514 {
2515 if (!pGVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2516 break;
2517
2518 /** @todo validate this EMT claim... GVM knows. */
2519 VMMR0ENTRYEXARGS Args;
2520 Args.pGVM = pGVM;
2521 Args.idCpu = idCpu;
2522 Args.enmOperation = enmOperation;
2523 Args.pReq = pReq;
2524 Args.u64Arg = u64Arg;
2525 Args.pSession = pSession;
2526 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2527 }
2528 return VERR_VM_THREAD_NOT_EMT;
2529 }
2530
2531 default:
2532 case VMMR0_DO_PGM_POOL_GROW:
2533 break;
2534 }
2535 }
2536 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2537}
2538
2539
2540/**
2541 * Checks whether we've armed the ring-0 long jump machinery.
2542 *
2543 * @returns @c true / @c false
2544 * @param pVCpu The cross context virtual CPU structure.
2545 * @thread EMT
2546 * @sa VMMIsLongJumpArmed
2547 */
2548VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2549{
2550#ifdef RT_ARCH_X86
2551 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2552 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2553#else
2554 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2555 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2556#endif
2557}
2558
2559
2560/**
2561 * Checks whether we've done a ring-3 long jump.
2562 *
2563 * @returns @c true / @c false
2564 * @param pVCpu The cross context virtual CPU structure.
2565 * @thread EMT
2566 */
2567VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2568{
2569 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2570}
2571
2572
2573/**
2574 * Internal R0 logger worker: Flush logger.
2575 *
2576 * @param pLogger The logger instance to flush.
2577 * @remark This function must be exported!
2578 */
2579VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2580{
2581#ifdef LOG_ENABLED
2582 /*
2583 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2584 * (This is a bit paranoid code.)
2585 */
2586 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_UOFFSETOF(VMMR0LOGGER, Logger));
2587 if ( !VALID_PTR(pR0Logger)
2588 || !VALID_PTR(pR0Logger + 1)
2589 || pLogger->u32Magic != RTLOGGER_MAGIC)
2590 {
2591# ifdef DEBUG
2592 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2593# endif
2594 return;
2595 }
2596 if (pR0Logger->fFlushingDisabled)
2597 return; /* quietly */
2598
2599 PVMCC pVM = pR0Logger->pVM;
2600 if ( !VALID_PTR(pVM)
2601 || pVM->pSelf != pVM)
2602 {
2603# ifdef DEBUG
2604 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pSelf=%p! pLogger=%p\n", pVM, pVM->pSelf, pLogger);
2605# endif
2606 return;
2607 }
2608
2609 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2610 if (pVCpu)
2611 {
2612 /*
2613 * Check that the jump buffer is armed.
2614 */
2615# ifdef RT_ARCH_X86
2616 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2617 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2618# else
2619 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2620 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2621# endif
2622 {
2623# ifdef DEBUG
2624 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2625# endif
2626 return;
2627 }
2628 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2629 }
2630# ifdef DEBUG
2631 else
2632 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2633# endif
2634#else
2635 NOREF(pLogger);
2636#endif /* LOG_ENABLED */
2637}
2638
2639#ifdef LOG_ENABLED
2640
2641/**
2642 * Disables flushing of the ring-0 debug log.
2643 *
2644 * @param pVCpu The cross context virtual CPU structure.
2645 */
2646VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPUCC pVCpu)
2647{
2648 if (pVCpu->vmm.s.pR0LoggerR0)
2649 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2650 if (pVCpu->vmm.s.pR0RelLoggerR0)
2651 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = true;
2652}
2653
2654
2655/**
2656 * Enables flushing of the ring-0 debug log.
2657 *
2658 * @param pVCpu The cross context virtual CPU structure.
2659 */
2660VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPUCC pVCpu)
2661{
2662 if (pVCpu->vmm.s.pR0LoggerR0)
2663 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2664 if (pVCpu->vmm.s.pR0RelLoggerR0)
2665 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = false;
2666}
2667
2668
2669/**
2670 * Checks if log flushing is disabled or not.
2671 *
2672 * @param pVCpu The cross context virtual CPU structure.
2673 */
2674VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPUCC pVCpu)
2675{
2676 if (pVCpu->vmm.s.pR0LoggerR0)
2677 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2678 if (pVCpu->vmm.s.pR0RelLoggerR0)
2679 return pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled;
2680 return true;
2681}
2682
2683#endif /* LOG_ENABLED */
2684
2685/*
2686 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
2687 */
2688DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
2689{
2690 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
2691 if (pGVCpu)
2692 {
2693 PVMCPUCC pVCpu = pGVCpu;
2694 if (RT_VALID_PTR(pVCpu))
2695 {
2696 PVMMR0LOGGER pVmmLogger = pVCpu->vmm.s.pR0RelLoggerR0;
2697 if (RT_VALID_PTR(pVmmLogger))
2698 {
2699 if ( pVmmLogger->fCreated
2700 && pVmmLogger->pVM == pGVCpu->pGVM)
2701 {
2702 if (pVmmLogger->Logger.fFlags & RTLOGFLAGS_DISABLED)
2703 return NULL;
2704 uint16_t const fFlags = RT_LO_U16(fFlagsAndGroup);
2705 uint16_t const iGroup = RT_HI_U16(fFlagsAndGroup);
2706 if ( iGroup != UINT16_MAX
2707 && ( ( pVmmLogger->Logger.afGroups[iGroup < pVmmLogger->Logger.cGroups ? iGroup : 0]
2708 & (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED))
2709 != (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED)))
2710 return NULL;
2711 return &pVmmLogger->Logger;
2712 }
2713 }
2714 }
2715 }
2716 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
2717}
2718
2719
2720/*
2721 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2722 *
2723 * @returns true if the breakpoint should be hit, false if it should be ignored.
2724 */
2725DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2726{
2727#if 0
2728 return true;
2729#else
2730 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2731 if (pVM)
2732 {
2733 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2734
2735 if (pVCpu)
2736 {
2737# ifdef RT_ARCH_X86
2738 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2739 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2740# else
2741 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2742 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2743# endif
2744 {
2745 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2746 return RT_FAILURE_NP(rc);
2747 }
2748 }
2749 }
2750# ifdef RT_OS_LINUX
2751 return true;
2752# else
2753 return false;
2754# endif
2755#endif
2756}
2757
2758
2759/*
2760 * Override this so we can push it up to ring-3.
2761 */
2762DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2763{
2764 /*
2765 * To the log.
2766 */
2767 LogAlways(("\n!!R0-Assertion Failed!!\n"
2768 "Expression: %s\n"
2769 "Location : %s(%d) %s\n",
2770 pszExpr, pszFile, uLine, pszFunction));
2771
2772 /*
2773 * To the global VMM buffer.
2774 */
2775 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2776 if (pVM)
2777 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2778 "\n!!R0-Assertion Failed!!\n"
2779 "Expression: %.*s\n"
2780 "Location : %s(%d) %s\n",
2781 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2782 pszFile, uLine, pszFunction);
2783
2784 /*
2785 * Continue the normal way.
2786 */
2787 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2788}
2789
2790
2791/**
2792 * Callback for RTLogFormatV which writes to the ring-3 log port.
2793 * See PFNLOGOUTPUT() for details.
2794 */
2795static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2796{
2797 for (size_t i = 0; i < cbChars; i++)
2798 {
2799 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2800 }
2801
2802 NOREF(pv);
2803 return cbChars;
2804}
2805
2806
2807/*
2808 * Override this so we can push it up to ring-3.
2809 */
2810DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2811{
2812 va_list vaCopy;
2813
2814 /*
2815 * Push the message to the loggers.
2816 */
2817 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2818 if (pLog)
2819 {
2820 va_copy(vaCopy, va);
2821 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2822 va_end(vaCopy);
2823 }
2824 pLog = RTLogRelGetDefaultInstance();
2825 if (pLog)
2826 {
2827 va_copy(vaCopy, va);
2828 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2829 va_end(vaCopy);
2830 }
2831
2832 /*
2833 * Push it to the global VMM buffer.
2834 */
2835 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2836 if (pVM)
2837 {
2838 va_copy(vaCopy, va);
2839 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2840 va_end(vaCopy);
2841 }
2842
2843 /*
2844 * Continue the normal way.
2845 */
2846 RTAssertMsg2V(pszFormat, va);
2847}
2848
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette