VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 88344

Last change on this file since 88344 was 88344, checked in by vboxsync, 4 years ago

Forward ported r143567 from 6.1: vmmR0DoHalt fix - need to re-check FFs after changing the state. oem2ticketref:40

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 100.4 KB
Line 
1/* $Id: VMMR0.cpp 88344 2021-04-01 11:25:37Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pgm.h>
30#ifdef VBOX_WITH_NEM_R0
31# include <VBox/vmm/nem.h>
32#endif
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/stam.h>
35#include <VBox/vmm/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvm.h>
39#ifdef VBOX_WITH_PCI_PASSTHROUGH
40# include <VBox/vmm/pdmpci.h>
41#endif
42#include <VBox/vmm/apic.h>
43
44#include <VBox/vmm/gvmm.h>
45#include <VBox/vmm/gmm.h>
46#include <VBox/vmm/gim.h>
47#include <VBox/intnet.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/param.h>
50#include <VBox/err.h>
51#include <VBox/version.h>
52#include <VBox/log.h>
53
54#include <iprt/asm-amd64-x86.h>
55#include <iprt/assert.h>
56#include <iprt/crc.h>
57#include <iprt/mp.h>
58#include <iprt/once.h>
59#include <iprt/stdarg.h>
60#include <iprt/string.h>
61#include <iprt/thread.h>
62#include <iprt/timer.h>
63#include <iprt/time.h>
64
65#include "dtrace/VBoxVMM.h"
66
67
68#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
69# pragma intrinsic(_AddressOfReturnAddress)
70#endif
71
72#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
73# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
74#endif
75
76
77
78/*********************************************************************************************************************************
79* Defined Constants And Macros *
80*********************************************************************************************************************************/
81/** @def VMM_CHECK_SMAP_SETUP
82 * SMAP check setup. */
83/** @def VMM_CHECK_SMAP_CHECK
84 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
85 * it will be logged and @a a_BadExpr is executed. */
86/** @def VMM_CHECK_SMAP_CHECK2
87 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
88 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
89 * executed. */
90#if (defined(VBOX_STRICT) || 1) && !defined(VBOX_WITH_RAM_IN_KERNEL)
91# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
92# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
93 do { \
94 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
95 { \
96 RTCCUINTREG fEflCheck = ASMGetFlags(); \
97 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
98 { /* likely */ } \
99 else \
100 { \
101 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
102 a_BadExpr; \
103 } \
104 } \
105 } while (0)
106# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \
107 do { \
108 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
109 { \
110 RTCCUINTREG fEflCheck = ASMGetFlags(); \
111 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
112 { /* likely */ } \
113 else if (a_pGVM) \
114 { \
115 SUPR0BadContext((a_pGVM)->pSession, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
116 RTStrPrintf((a_pGVM)->vmm.s.szRing0AssertMsg1, sizeof((a_pGVM)->vmm.s.szRing0AssertMsg1), \
117 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
118 a_BadExpr; \
119 } \
120 else \
121 { \
122 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
123 a_BadExpr; \
124 } \
125 } \
126 } while (0)
127#else
128# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
129# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
130# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) NOREF(fKernelFeatures)
131#endif
132
133
134/*********************************************************************************************************************************
135* Internal Functions *
136*********************************************************************************************************************************/
137RT_C_DECLS_BEGIN
138#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
139extern uint64_t __udivdi3(uint64_t, uint64_t);
140extern uint64_t __umoddi3(uint64_t, uint64_t);
141#endif
142RT_C_DECLS_END
143
144
145/*********************************************************************************************************************************
146* Global Variables *
147*********************************************************************************************************************************/
148/** Drag in necessary library bits.
149 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
150struct CLANG11WEIRDNOTHROW { PFNRT pfn; } g_VMMR0Deps[] =
151{
152 { (PFNRT)RTCrc32 },
153 { (PFNRT)RTOnce },
154#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
155 { (PFNRT)__udivdi3 },
156 { (PFNRT)__umoddi3 },
157#endif
158 { NULL }
159};
160
161#ifdef RT_OS_SOLARIS
162/* Dependency information for the native solaris loader. */
163extern "C" { char _depends_on[] = "vboxdrv"; }
164#endif
165
166
167/**
168 * Initialize the module.
169 * This is called when we're first loaded.
170 *
171 * @returns 0 on success.
172 * @returns VBox status on failure.
173 * @param hMod Image handle for use in APIs.
174 */
175DECLEXPORT(int) ModuleInit(void *hMod)
176{
177 VMM_CHECK_SMAP_SETUP();
178 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
179
180#ifdef VBOX_WITH_DTRACE_R0
181 /*
182 * The first thing to do is register the static tracepoints.
183 * (Deregistration is automatic.)
184 */
185 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
186 if (RT_FAILURE(rc2))
187 return rc2;
188#endif
189 LogFlow(("ModuleInit:\n"));
190
191#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
192 /*
193 * Display the CMOS debug code.
194 */
195 ASMOutU8(0x72, 0x03);
196 uint8_t bDebugCode = ASMInU8(0x73);
197 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
198 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
199#endif
200
201 /*
202 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
203 */
204 int rc = vmmInitFormatTypes();
205 if (RT_SUCCESS(rc))
206 {
207 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
208 rc = GVMMR0Init();
209 if (RT_SUCCESS(rc))
210 {
211 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
212 rc = GMMR0Init();
213 if (RT_SUCCESS(rc))
214 {
215 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
216 rc = HMR0Init();
217 if (RT_SUCCESS(rc))
218 {
219 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
220
221 PDMR0Init(hMod);
222 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
223
224 rc = PGMRegisterStringFormatTypes();
225 if (RT_SUCCESS(rc))
226 {
227 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
228#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
229 rc = PGMR0DynMapInit();
230#endif
231 if (RT_SUCCESS(rc))
232 {
233 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
234 rc = IntNetR0Init();
235 if (RT_SUCCESS(rc))
236 {
237#ifdef VBOX_WITH_PCI_PASSTHROUGH
238 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
239 rc = PciRawR0Init();
240#endif
241 if (RT_SUCCESS(rc))
242 {
243 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
244 rc = CPUMR0ModuleInit();
245 if (RT_SUCCESS(rc))
246 {
247#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
248 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
249 rc = vmmR0TripleFaultHackInit();
250 if (RT_SUCCESS(rc))
251#endif
252 {
253 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
254 if (RT_SUCCESS(rc))
255 {
256 LogFlow(("ModuleInit: returns success\n"));
257 return VINF_SUCCESS;
258 }
259 }
260
261 /*
262 * Bail out.
263 */
264#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
265 vmmR0TripleFaultHackTerm();
266#endif
267 }
268 else
269 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
270#ifdef VBOX_WITH_PCI_PASSTHROUGH
271 PciRawR0Term();
272#endif
273 }
274 else
275 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
276 IntNetR0Term();
277 }
278 else
279 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
280#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
281 PGMR0DynMapTerm();
282#endif
283 }
284 else
285 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
286 PGMDeregisterStringFormatTypes();
287 }
288 else
289 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
290 HMR0Term();
291 }
292 else
293 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
294 GMMR0Term();
295 }
296 else
297 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
298 GVMMR0Term();
299 }
300 else
301 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
302 vmmTermFormatTypes();
303 }
304 else
305 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
306
307 LogFlow(("ModuleInit: failed %Rrc\n", rc));
308 return rc;
309}
310
311
312/**
313 * Terminate the module.
314 * This is called when we're finally unloaded.
315 *
316 * @param hMod Image handle for use in APIs.
317 */
318DECLEXPORT(void) ModuleTerm(void *hMod)
319{
320 NOREF(hMod);
321 LogFlow(("ModuleTerm:\n"));
322
323 /*
324 * Terminate the CPUM module (Local APIC cleanup).
325 */
326 CPUMR0ModuleTerm();
327
328 /*
329 * Terminate the internal network service.
330 */
331 IntNetR0Term();
332
333 /*
334 * PGM (Darwin), HM and PciRaw global cleanup.
335 */
336#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
337 PGMR0DynMapTerm();
338#endif
339#ifdef VBOX_WITH_PCI_PASSTHROUGH
340 PciRawR0Term();
341#endif
342 PGMDeregisterStringFormatTypes();
343 HMR0Term();
344#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
345 vmmR0TripleFaultHackTerm();
346#endif
347
348 /*
349 * Destroy the GMM and GVMM instances.
350 */
351 GMMR0Term();
352 GVMMR0Term();
353
354 vmmTermFormatTypes();
355
356 LogFlow(("ModuleTerm: returns\n"));
357}
358
359
360/**
361 * Initiates the R0 driver for a particular VM instance.
362 *
363 * @returns VBox status code.
364 *
365 * @param pGVM The global (ring-0) VM structure.
366 * @param uSvnRev The SVN revision of the ring-3 part.
367 * @param uBuildType Build type indicator.
368 * @thread EMT(0)
369 */
370static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
371{
372 VMM_CHECK_SMAP_SETUP();
373 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
374
375 /*
376 * Match the SVN revisions and build type.
377 */
378 if (uSvnRev != VMMGetSvnRev())
379 {
380 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
381 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
382 return VERR_VMM_R0_VERSION_MISMATCH;
383 }
384 if (uBuildType != vmmGetBuildType())
385 {
386 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
387 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
388 return VERR_VMM_R0_VERSION_MISMATCH;
389 }
390
391 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
392 if (RT_FAILURE(rc))
393 return rc;
394
395#ifdef LOG_ENABLED
396 /*
397 * Register the EMT R0 logger instance for VCPU 0.
398 */
399 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
400
401 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
402 if (pR0Logger)
403 {
404# if 0 /* testing of the logger. */
405 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
406 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
407 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
408 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
409
410 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
411 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
412 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
413 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
414
415 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
416 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
417 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
418 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
419
420 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
421 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
422 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
423 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
424 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
425 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
426
427 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
428 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
429
430 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
431 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
432 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
433# endif
434 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pGVM->pSession));
435 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
436 pR0Logger->fRegistered = true;
437 }
438#endif /* LOG_ENABLED */
439
440 /*
441 * Check if the host supports high resolution timers or not.
442 */
443 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
444 && !RTTimerCanDoHighResolution())
445 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
446
447 /*
448 * Initialize the per VM data for GVMM and GMM.
449 */
450 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
451 rc = GVMMR0InitVM(pGVM);
452 if (RT_SUCCESS(rc))
453 {
454 /*
455 * Init HM, CPUM and PGM (Darwin only).
456 */
457 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
458 rc = HMR0InitVM(pGVM);
459 if (RT_SUCCESS(rc))
460 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
461 if (RT_SUCCESS(rc))
462 {
463 rc = CPUMR0InitVM(pGVM);
464 if (RT_SUCCESS(rc))
465 {
466 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
467 rc = PGMR0InitVM(pGVM);
468 if (RT_SUCCESS(rc))
469 {
470 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
471 rc = EMR0InitVM(pGVM);
472 if (RT_SUCCESS(rc))
473 {
474 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
475#ifdef VBOX_WITH_PCI_PASSTHROUGH
476 rc = PciRawR0InitVM(pGVM);
477#endif
478 if (RT_SUCCESS(rc))
479 {
480 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
481 rc = GIMR0InitVM(pGVM);
482 if (RT_SUCCESS(rc))
483 {
484 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION);
485 if (RT_SUCCESS(rc))
486 {
487 GVMMR0DoneInitVM(pGVM);
488
489 /*
490 * Collect a bit of info for the VM release log.
491 */
492 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
493 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
494
495 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
496 return rc;
497 }
498
499 /* bail out*/
500 GIMR0TermVM(pGVM);
501 }
502#ifdef VBOX_WITH_PCI_PASSTHROUGH
503 PciRawR0TermVM(pGVM);
504#endif
505 }
506 }
507 }
508 }
509 HMR0TermVM(pGVM);
510 }
511 }
512
513 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
514 return rc;
515}
516
517
518/**
519 * Does EMT specific VM initialization.
520 *
521 * @returns VBox status code.
522 * @param pGVM The ring-0 VM structure.
523 * @param idCpu The EMT that's calling.
524 */
525static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
526{
527 /* Paranoia (caller checked these already). */
528 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
529 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
530
531#ifdef LOG_ENABLED
532 /*
533 * Registration of ring 0 loggers.
534 */
535 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
536 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
537 if ( pR0Logger
538 && !pR0Logger->fRegistered)
539 {
540 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
541 pR0Logger->fRegistered = true;
542 }
543#endif
544
545 return VINF_SUCCESS;
546}
547
548
549
550/**
551 * Terminates the R0 bits for a particular VM instance.
552 *
553 * This is normally called by ring-3 as part of the VM termination process, but
554 * may alternatively be called during the support driver session cleanup when
555 * the VM object is destroyed (see GVMM).
556 *
557 * @returns VBox status code.
558 *
559 * @param pGVM The global (ring-0) VM structure.
560 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
561 * thread.
562 * @thread EMT(0) or session clean up thread.
563 */
564VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
565{
566 /*
567 * Check EMT(0) claim if we're called from userland.
568 */
569 if (idCpu != NIL_VMCPUID)
570 {
571 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
572 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
573 if (RT_FAILURE(rc))
574 return rc;
575 }
576
577#ifdef VBOX_WITH_PCI_PASSTHROUGH
578 PciRawR0TermVM(pGVM);
579#endif
580
581 /*
582 * Tell GVMM what we're up to and check that we only do this once.
583 */
584 if (GVMMR0DoingTermVM(pGVM))
585 {
586 GIMR0TermVM(pGVM);
587
588 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
589 * here to make sure we don't leak any shared pages if we crash... */
590#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
591 PGMR0DynMapTermVM(pGVM);
592#endif
593 HMR0TermVM(pGVM);
594 }
595
596 /*
597 * Deregister the logger.
598 */
599 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
600 return VINF_SUCCESS;
601}
602
603#if 0 /** @todo broken see oem2 ticket 40 */
604
605/**
606 * An interrupt or unhalt force flag is set, deal with it.
607 *
608 * @returns VINF_SUCCESS (or VINF_EM_HALT).
609 * @param pVCpu The cross context virtual CPU structure.
610 * @param uMWait Result from EMMonitorWaitIsActive().
611 * @param enmInterruptibility Guest CPU interruptbility level.
612 */
613static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
614{
615 Assert(!TRPMHasTrap(pVCpu));
616 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
617 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
618
619 /*
620 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
621 */
622 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
623 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
624 {
625 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
626 {
627 uint8_t u8Interrupt = 0;
628 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
629 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
630 if (RT_SUCCESS(rc))
631 {
632 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
633
634 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
635 AssertRCSuccess(rc);
636 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
637 return rc;
638 }
639 }
640 }
641 /*
642 * SMI is not implemented yet, at least not here.
643 */
644 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
645 {
646 return VINF_EM_HALT;
647 }
648 /*
649 * NMI.
650 */
651 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
652 {
653 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
654 {
655 /** @todo later. */
656 return VINF_EM_HALT;
657 }
658 }
659 /*
660 * Nested-guest virtual interrupt.
661 */
662 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
663 {
664 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
665 {
666 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
667 * here before injecting the virtual interrupt. See emR3ForcedActions
668 * for details. */
669 return VINF_EM_HALT;
670 }
671 }
672
673 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
674 {
675 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
676 return VINF_SUCCESS;
677 }
678 if (uMWait > 1)
679 {
680 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
681 return VINF_SUCCESS;
682 }
683
684 return VINF_EM_HALT;
685}
686
687
688/**
689 * This does one round of vmR3HaltGlobal1Halt().
690 *
691 * The rational here is that we'll reduce latency in interrupt situations if we
692 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
693 * MWAIT), but do one round of blocking here instead and hope the interrupt is
694 * raised in the meanwhile.
695 *
696 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
697 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
698 * ring-0 call (unless we're too close to a timer event). When the interrupt
699 * wakes us up, we'll return from ring-0 and EM will by instinct do a
700 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
701 * back to VMMR0EntryFast().
702 *
703 * @returns VINF_SUCCESS or VINF_EM_HALT.
704 * @param pGVM The ring-0 VM structure.
705 * @param pGVCpu The ring-0 virtual CPU structure.
706 *
707 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
708 * the VM module, probably to VMM. Then this would be more weird wrt
709 * parameters and statistics.
710 */
711static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
712{
713 /*
714 * Do spin stat historization.
715 */
716 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
717 { /* likely */ }
718 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
719 {
720 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
721 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
722 }
723 else
724 {
725 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
726 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
727 }
728
729 /*
730 * Flags that makes us go to ring-3.
731 */
732 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
733 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
734 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
735 | VM_FF_PGM_NO_MEMORY | VM_FF_DEBUG_SUSPEND;
736 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
737 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
738 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
739 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
740
741 /*
742 * Check preconditions.
743 */
744 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
745 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
746 if ( pGVCpu->vmm.s.fMayHaltInRing0
747 && !TRPMHasTrap(pGVCpu)
748 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
749 || uMWait > 1))
750 {
751 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
752 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
753 {
754 /*
755 * Interrupts pending already?
756 */
757 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
758 APICUpdatePendingInterrupts(pGVCpu);
759
760 /*
761 * Flags that wake up from the halted state.
762 */
763 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
764 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
765
766 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
767 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
768 ASMNopPause();
769
770 /*
771 * Check out how long till the next timer event.
772 */
773 uint64_t u64Delta;
774 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
775
776 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
777 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
778 {
779 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
780 APICUpdatePendingInterrupts(pGVCpu);
781
782 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
783 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
784
785 /*
786 * Wait if there is enough time to the next timer event.
787 */
788 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
789 {
790 /* If there are few other CPU cores around, we will procrastinate a
791 little before going to sleep, hoping for some device raising an
792 interrupt or similar. Though, the best thing here would be to
793 dynamically adjust the spin count according to its usfulness or
794 something... */
795 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
796 && RTMpGetOnlineCount() >= 4)
797 {
798 /** @todo Figure out how we can skip this if it hasn't help recently...
799 * @bugref{9172#c12} */
800 uint32_t cSpinLoops = 42;
801 while (cSpinLoops-- > 0)
802 {
803 ASMNopPause();
804 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
805 APICUpdatePendingInterrupts(pGVCpu);
806 ASMNopPause();
807 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
808 {
809 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
810 return VINF_EM_HALT;
811 }
812 ASMNopPause();
813 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
814 {
815 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
816 return VINF_EM_HALT;
817 }
818 ASMNopPause();
819 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
820 {
821 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
822 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
823 }
824 ASMNopPause();
825 }
826 }
827
828 /*
829 * We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
830 * knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here).
831 * After changing the state we must recheck the force flags of course.
832 */
833 if (VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED))
834 {
835 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
836 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
837 {
838 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
839 APICUpdatePendingInterrupts(pGVCpu);
840
841 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
842 {
843 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
844 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
845 }
846
847 /* Okay, block! */
848 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
849 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
850 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
851 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
852
853 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
854 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
855 if ( rc == VINF_SUCCESS
856 || rc == VERR_INTERRUPTED)
857 {
858 /* Keep some stats like ring-3 does. */
859 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
860 if (cNsOverslept > 50000)
861 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
862 else if (cNsOverslept < -50000)
863 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
864 else
865 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
866
867 /*
868 * Recheck whether we can resume execution or have to go to ring-3.
869 */
870 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
871 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
872 {
873 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
874 APICUpdatePendingInterrupts(pGVCpu);
875 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
876 {
877 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
878 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
879 }
880 }
881 }
882 }
883 else
884 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
885 }
886 }
887 }
888 }
889 }
890 return VINF_EM_HALT;
891}
892
893#endif
894
895/**
896 * VMM ring-0 thread-context callback.
897 *
898 * This does common HM state updating and calls the HM-specific thread-context
899 * callback.
900 *
901 * @param enmEvent The thread-context event.
902 * @param pvUser Opaque pointer to the VMCPU.
903 *
904 * @thread EMT(pvUser)
905 */
906static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
907{
908 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
909
910 switch (enmEvent)
911 {
912 case RTTHREADCTXEVENT_IN:
913 {
914 /*
915 * Linux may call us with preemption enabled (really!) but technically we
916 * cannot get preempted here, otherwise we end up in an infinite recursion
917 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
918 * ad infinitum). Let's just disable preemption for now...
919 */
920 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
921 * preemption after doing the callout (one or two functions up the
922 * call chain). */
923 /** @todo r=ramshankar: See @bugref{5313#c30}. */
924 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
925 RTThreadPreemptDisable(&ParanoidPreemptState);
926
927 /* We need to update the VCPU <-> host CPU mapping. */
928 RTCPUID idHostCpu;
929 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
930 pVCpu->iHostCpuSet = iHostCpuSet;
931 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
932
933 /* In the very unlikely event that the GIP delta for the CPU we're
934 rescheduled needs calculating, try force a return to ring-3.
935 We unfortunately cannot do the measurements right here. */
936 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
937 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
938
939 /* Invoke the HM-specific thread-context callback. */
940 HMR0ThreadCtxCallback(enmEvent, pvUser);
941
942 /* Restore preemption. */
943 RTThreadPreemptRestore(&ParanoidPreemptState);
944 break;
945 }
946
947 case RTTHREADCTXEVENT_OUT:
948 {
949 /* Invoke the HM-specific thread-context callback. */
950 HMR0ThreadCtxCallback(enmEvent, pvUser);
951
952 /*
953 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
954 * have the same host CPU associated with it.
955 */
956 pVCpu->iHostCpuSet = UINT32_MAX;
957 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
958 break;
959 }
960
961 default:
962 /* Invoke the HM-specific thread-context callback. */
963 HMR0ThreadCtxCallback(enmEvent, pvUser);
964 break;
965 }
966}
967
968
969/**
970 * Creates thread switching hook for the current EMT thread.
971 *
972 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
973 * platform does not implement switcher hooks, no hooks will be create and the
974 * member set to NIL_RTTHREADCTXHOOK.
975 *
976 * @returns VBox status code.
977 * @param pVCpu The cross context virtual CPU structure.
978 * @thread EMT(pVCpu)
979 */
980VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
981{
982 VMCPU_ASSERT_EMT(pVCpu);
983 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
984
985#if 1 /* To disable this stuff change to zero. */
986 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
987 if (RT_SUCCESS(rc))
988 return rc;
989#else
990 RT_NOREF(vmmR0ThreadCtxCallback);
991 int rc = VERR_NOT_SUPPORTED;
992#endif
993
994 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
995 if (rc == VERR_NOT_SUPPORTED)
996 return VINF_SUCCESS;
997
998 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
999 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
1000}
1001
1002
1003/**
1004 * Destroys the thread switching hook for the specified VCPU.
1005 *
1006 * @param pVCpu The cross context virtual CPU structure.
1007 * @remarks Can be called from any thread.
1008 */
1009VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
1010{
1011 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
1012 AssertRC(rc);
1013 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1014}
1015
1016
1017/**
1018 * Disables the thread switching hook for this VCPU (if we got one).
1019 *
1020 * @param pVCpu The cross context virtual CPU structure.
1021 * @thread EMT(pVCpu)
1022 *
1023 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
1024 * this call. This means you have to be careful with what you do!
1025 */
1026VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1027{
1028 /*
1029 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1030 * @bugref{7726#c19} explains the need for this trick:
1031 *
1032 * VMXR0CallRing3Callback/SVMR0CallRing3Callback &
1033 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1034 * longjmp & normal return to ring-3, which opens a window where we may be
1035 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
1036 * the CPU starts executing a different EMT. Both functions first disables
1037 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1038 * an opening for getting preempted.
1039 */
1040 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1041 * all the time. */
1042 /** @todo move this into the context hook disabling if(). */
1043 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1044
1045 /*
1046 * Disable the context hook, if we got one.
1047 */
1048 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1049 {
1050 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1051 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1052 AssertRC(rc);
1053 }
1054}
1055
1056
1057/**
1058 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1059 *
1060 * @returns true if registered, false otherwise.
1061 * @param pVCpu The cross context virtual CPU structure.
1062 */
1063DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1064{
1065 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
1066}
1067
1068
1069/**
1070 * Whether thread-context hooks are registered for this VCPU.
1071 *
1072 * @returns true if registered, false otherwise.
1073 * @param pVCpu The cross context virtual CPU structure.
1074 */
1075VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1076{
1077 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1078}
1079
1080
1081/**
1082 * Returns the ring-0 release logger instance.
1083 *
1084 * @returns Pointer to release logger, NULL if not configured.
1085 * @param pVCpu The cross context virtual CPU structure of the caller.
1086 * @thread EMT(pVCpu)
1087 */
1088VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu)
1089{
1090 PVMMR0LOGGER pLogger = pVCpu->vmm.s.pR0RelLoggerR0;
1091 if (pLogger)
1092 return &pLogger->Logger;
1093 return NULL;
1094}
1095
1096
1097#ifdef VBOX_WITH_STATISTICS
1098/**
1099 * Record return code statistics
1100 * @param pVM The cross context VM structure.
1101 * @param pVCpu The cross context virtual CPU structure.
1102 * @param rc The status code.
1103 */
1104static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1105{
1106 /*
1107 * Collect statistics.
1108 */
1109 switch (rc)
1110 {
1111 case VINF_SUCCESS:
1112 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1113 break;
1114 case VINF_EM_RAW_INTERRUPT:
1115 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1116 break;
1117 case VINF_EM_RAW_INTERRUPT_HYPER:
1118 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1119 break;
1120 case VINF_EM_RAW_GUEST_TRAP:
1121 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1122 break;
1123 case VINF_EM_RAW_RING_SWITCH:
1124 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1125 break;
1126 case VINF_EM_RAW_RING_SWITCH_INT:
1127 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1128 break;
1129 case VINF_EM_RAW_STALE_SELECTOR:
1130 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1131 break;
1132 case VINF_EM_RAW_IRET_TRAP:
1133 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1134 break;
1135 case VINF_IOM_R3_IOPORT_READ:
1136 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1137 break;
1138 case VINF_IOM_R3_IOPORT_WRITE:
1139 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1140 break;
1141 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1142 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1143 break;
1144 case VINF_IOM_R3_MMIO_READ:
1145 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1146 break;
1147 case VINF_IOM_R3_MMIO_WRITE:
1148 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1149 break;
1150 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1151 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1152 break;
1153 case VINF_IOM_R3_MMIO_READ_WRITE:
1154 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1155 break;
1156 case VINF_PATM_HC_MMIO_PATCH_READ:
1157 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1158 break;
1159 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1160 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1161 break;
1162 case VINF_CPUM_R3_MSR_READ:
1163 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1164 break;
1165 case VINF_CPUM_R3_MSR_WRITE:
1166 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1167 break;
1168 case VINF_EM_RAW_EMULATE_INSTR:
1169 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1170 break;
1171 case VINF_PATCH_EMULATE_INSTR:
1172 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1173 break;
1174 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1175 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1176 break;
1177 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1178 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1179 break;
1180 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1181 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1182 break;
1183 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1184 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1185 break;
1186 case VINF_CSAM_PENDING_ACTION:
1187 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1188 break;
1189 case VINF_PGM_SYNC_CR3:
1190 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1191 break;
1192 case VINF_PATM_PATCH_INT3:
1193 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1194 break;
1195 case VINF_PATM_PATCH_TRAP_PF:
1196 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1197 break;
1198 case VINF_PATM_PATCH_TRAP_GP:
1199 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1200 break;
1201 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1202 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1203 break;
1204 case VINF_EM_RESCHEDULE_REM:
1205 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1206 break;
1207 case VINF_EM_RAW_TO_R3:
1208 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1209 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1210 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1211 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1212 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1213 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1214 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1215 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1216 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1217 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1218 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1219 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1220 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1221 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1222 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1223 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1224 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1225 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1226 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1227 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1228 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1229 else
1230 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1231 break;
1232
1233 case VINF_EM_RAW_TIMER_PENDING:
1234 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1235 break;
1236 case VINF_EM_RAW_INTERRUPT_PENDING:
1237 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1238 break;
1239 case VINF_VMM_CALL_HOST:
1240 switch (pVCpu->vmm.s.enmCallRing3Operation)
1241 {
1242 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1243 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1244 break;
1245 case VMMCALLRING3_PDM_LOCK:
1246 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1247 break;
1248 case VMMCALLRING3_PGM_POOL_GROW:
1249 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1250 break;
1251 case VMMCALLRING3_PGM_LOCK:
1252 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1253 break;
1254 case VMMCALLRING3_PGM_MAP_CHUNK:
1255 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1256 break;
1257 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1258 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1259 break;
1260 case VMMCALLRING3_VMM_LOGGER_FLUSH:
1261 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
1262 break;
1263 case VMMCALLRING3_VM_SET_ERROR:
1264 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1265 break;
1266 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1267 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1268 break;
1269 case VMMCALLRING3_VM_R0_ASSERTION:
1270 default:
1271 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1272 break;
1273 }
1274 break;
1275 case VINF_PATM_DUPLICATE_FUNCTION:
1276 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1277 break;
1278 case VINF_PGM_CHANGE_MODE:
1279 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1280 break;
1281 case VINF_PGM_POOL_FLUSH_PENDING:
1282 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1283 break;
1284 case VINF_EM_PENDING_REQUEST:
1285 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1286 break;
1287 case VINF_EM_HM_PATCH_TPR_INSTR:
1288 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1289 break;
1290 default:
1291 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1292 break;
1293 }
1294}
1295#endif /* VBOX_WITH_STATISTICS */
1296
1297
1298/**
1299 * The Ring 0 entry point, called by the fast-ioctl path.
1300 *
1301 * @param pGVM The global (ring-0) VM structure.
1302 * @param pVMIgnored The cross context VM structure. The return code is
1303 * stored in pVM->vmm.s.iLastGZRc.
1304 * @param idCpu The Virtual CPU ID of the calling EMT.
1305 * @param enmOperation Which operation to execute.
1306 * @remarks Assume called with interrupts _enabled_.
1307 */
1308VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1309{
1310 RT_NOREF(pVMIgnored);
1311
1312 /*
1313 * Validation.
1314 */
1315 if ( idCpu < pGVM->cCpus
1316 && pGVM->cCpus == pGVM->cCpusUnsafe)
1317 { /*likely*/ }
1318 else
1319 {
1320 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1321 return;
1322 }
1323
1324 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1325 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1326 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1327 && pGVCpu->hNativeThreadR0 == hNativeThread))
1328 { /* likely */ }
1329 else
1330 {
1331 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1332 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1333 return;
1334 }
1335
1336 /*
1337 * SMAP fun.
1338 */
1339 VMM_CHECK_SMAP_SETUP();
1340 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1341
1342 /*
1343 * Perform requested operation.
1344 */
1345 switch (enmOperation)
1346 {
1347 /*
1348 * Run guest code using the available hardware acceleration technology.
1349 */
1350 case VMMR0_DO_HM_RUN:
1351 {
1352 for (;;) /* hlt loop */
1353 {
1354 /*
1355 * Disable preemption.
1356 */
1357 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1358 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1359 RTThreadPreemptDisable(&PreemptState);
1360
1361 /*
1362 * Get the host CPU identifiers, make sure they are valid and that
1363 * we've got a TSC delta for the CPU.
1364 */
1365 RTCPUID idHostCpu;
1366 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1367 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1368 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1369 {
1370 pGVCpu->iHostCpuSet = iHostCpuSet;
1371 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1372
1373 /*
1374 * Update the periodic preemption timer if it's active.
1375 */
1376 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1377 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1378 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1379
1380#ifdef VMM_R0_TOUCH_FPU
1381 /*
1382 * Make sure we've got the FPU state loaded so and we don't need to clear
1383 * CR0.TS and get out of sync with the host kernel when loading the guest
1384 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1385 */
1386 CPUMR0TouchHostFpu();
1387#endif
1388 int rc;
1389 bool fPreemptRestored = false;
1390 if (!HMR0SuspendPending())
1391 {
1392 /*
1393 * Enable the context switching hook.
1394 */
1395 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1396 {
1397 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmm.s.hCtxHook));
1398 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1399 }
1400
1401 /*
1402 * Enter HM context.
1403 */
1404 rc = HMR0Enter(pGVCpu);
1405 if (RT_SUCCESS(rc))
1406 {
1407 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1408
1409 /*
1410 * When preemption hooks are in place, enable preemption now that
1411 * we're in HM context.
1412 */
1413 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1414 {
1415 fPreemptRestored = true;
1416 RTThreadPreemptRestore(&PreemptState);
1417 }
1418
1419 /*
1420 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1421 */
1422 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1423 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);
1424 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1425
1426 /*
1427 * Assert sanity on the way out. Using manual assertions code here as normal
1428 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1429 */
1430 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1431 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1432 {
1433 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1434 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1435 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1436 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1437 }
1438 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1439 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1440 {
1441 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1442 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1443 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1444 rc = VERR_INVALID_STATE;
1445 }
1446
1447 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1448 }
1449 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1450
1451 /*
1452 * Invalidate the host CPU identifiers before we disable the context
1453 * hook / restore preemption.
1454 */
1455 pGVCpu->iHostCpuSet = UINT32_MAX;
1456 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1457
1458 /*
1459 * Disable context hooks. Due to unresolved cleanup issues, we
1460 * cannot leave the hooks enabled when we return to ring-3.
1461 *
1462 * Note! At the moment HM may also have disabled the hook
1463 * when we get here, but the IPRT API handles that.
1464 */
1465 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1466 {
1467 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1468 RTThreadCtxHookDisable(pGVCpu->vmm.s.hCtxHook);
1469 }
1470 }
1471 /*
1472 * The system is about to go into suspend mode; go back to ring 3.
1473 */
1474 else
1475 {
1476 rc = VINF_EM_RAW_INTERRUPT;
1477 pGVCpu->iHostCpuSet = UINT32_MAX;
1478 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1479 }
1480
1481 /** @todo When HM stops messing with the context hook state, we'll disable
1482 * preemption again before the RTThreadCtxHookDisable call. */
1483 if (!fPreemptRestored)
1484 RTThreadPreemptRestore(&PreemptState);
1485
1486 pGVCpu->vmm.s.iLastGZRc = rc;
1487
1488 /* Fire dtrace probe and collect statistics. */
1489 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1490#ifdef VBOX_WITH_STATISTICS
1491 vmmR0RecordRC(pGVM, pGVCpu, rc);
1492#endif
1493#if 0 /** @todo broken see oem2 ticket 40 */
1494 /*
1495 * If this is a halt.
1496 */
1497 if (rc != VINF_EM_HALT)
1498 { /* we're not in a hurry for a HLT, so prefer this path */ }
1499 else
1500 {
1501 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1502 if (rc == VINF_SUCCESS)
1503 {
1504 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1505 continue;
1506 }
1507 pGVCpu->vmm.s.cR0HaltsToRing3++;
1508 }
1509#endif
1510 }
1511 /*
1512 * Invalid CPU set index or TSC delta in need of measuring.
1513 */
1514 else
1515 {
1516 pGVCpu->iHostCpuSet = UINT32_MAX;
1517 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1518 RTThreadPreemptRestore(&PreemptState);
1519 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1520 {
1521 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1522 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1523 0 /*default cTries*/);
1524 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1525 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1526 else
1527 pGVCpu->vmm.s.iLastGZRc = rc;
1528 }
1529 else
1530 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1531 }
1532 break;
1533
1534 } /* halt loop. */
1535 break;
1536 }
1537
1538#ifdef VBOX_WITH_NEM_R0
1539# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1540 case VMMR0_DO_NEM_RUN:
1541 {
1542 /*
1543 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1544 */
1545 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1546# ifdef VBOXSTRICTRC_STRICT_ENABLED
1547 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1548# else
1549 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1550# endif
1551 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1552 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1553
1554 pGVCpu->vmm.s.iLastGZRc = rc;
1555
1556 /*
1557 * Fire dtrace probe and collect statistics.
1558 */
1559 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1560# ifdef VBOX_WITH_STATISTICS
1561 vmmR0RecordRC(pGVM, pGVCpu, rc);
1562# endif
1563 break;
1564 }
1565# endif
1566#endif
1567
1568 /*
1569 * For profiling.
1570 */
1571 case VMMR0_DO_NOP:
1572 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1573 break;
1574
1575 /*
1576 * Shouldn't happen.
1577 */
1578 default:
1579 AssertMsgFailed(("%#x\n", enmOperation));
1580 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1581 break;
1582 }
1583 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1584}
1585
1586
1587/**
1588 * Validates a session or VM session argument.
1589 *
1590 * @returns true / false accordingly.
1591 * @param pGVM The global (ring-0) VM structure.
1592 * @param pClaimedSession The session claim to validate.
1593 * @param pSession The session argument.
1594 */
1595DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1596{
1597 /* This must be set! */
1598 if (!pSession)
1599 return false;
1600
1601 /* Only one out of the two. */
1602 if (pGVM && pClaimedSession)
1603 return false;
1604 if (pGVM)
1605 pClaimedSession = pGVM->pSession;
1606 return pClaimedSession == pSession;
1607}
1608
1609
1610/**
1611 * VMMR0EntryEx worker function, either called directly or when ever possible
1612 * called thru a longjmp so we can exit safely on failure.
1613 *
1614 * @returns VBox status code.
1615 * @param pGVM The global (ring-0) VM structure.
1616 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1617 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1618 * @param enmOperation Which operation to execute.
1619 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1620 * The support driver validates this if it's present.
1621 * @param u64Arg Some simple constant argument.
1622 * @param pSession The session of the caller.
1623 *
1624 * @remarks Assume called with interrupts _enabled_.
1625 */
1626static int vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1627 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1628{
1629 /*
1630 * Validate pGVM and idCpu for consistency and validity.
1631 */
1632 if (pGVM != NULL)
1633 {
1634 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0))
1635 { /* likely */ }
1636 else
1637 {
1638 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1639 return VERR_INVALID_POINTER;
1640 }
1641
1642 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1643 { /* likely */ }
1644 else
1645 {
1646 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1647 return VERR_INVALID_PARAMETER;
1648 }
1649
1650 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1651 && pGVM->enmVMState <= VMSTATE_TERMINATED
1652 && pGVM->pSession == pSession
1653 && pGVM->pSelf == pGVM))
1654 { /* likely */ }
1655 else
1656 {
1657 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1658 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1659 return VERR_INVALID_POINTER;
1660 }
1661 }
1662 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1663 { /* likely */ }
1664 else
1665 {
1666 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1667 return VERR_INVALID_PARAMETER;
1668 }
1669
1670 /*
1671 * SMAP fun.
1672 */
1673 VMM_CHECK_SMAP_SETUP();
1674 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1675
1676 /*
1677 * Process the request.
1678 */
1679 int rc;
1680 switch (enmOperation)
1681 {
1682 /*
1683 * GVM requests
1684 */
1685 case VMMR0_DO_GVMM_CREATE_VM:
1686 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1687 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1688 else
1689 rc = VERR_INVALID_PARAMETER;
1690 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1691 break;
1692
1693 case VMMR0_DO_GVMM_DESTROY_VM:
1694 if (pReqHdr == NULL && u64Arg == 0)
1695 rc = GVMMR0DestroyVM(pGVM);
1696 else
1697 rc = VERR_INVALID_PARAMETER;
1698 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1699 break;
1700
1701 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1702 if (pGVM != NULL)
1703 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1704 else
1705 rc = VERR_INVALID_PARAMETER;
1706 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1707 break;
1708
1709 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1710 if (pGVM != NULL)
1711 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1712 else
1713 rc = VERR_INVALID_PARAMETER;
1714 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1715 break;
1716
1717 case VMMR0_DO_GVMM_SCHED_HALT:
1718 if (pReqHdr)
1719 return VERR_INVALID_PARAMETER;
1720 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1721 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1722 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1723 break;
1724
1725 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1726 if (pReqHdr || u64Arg)
1727 return VERR_INVALID_PARAMETER;
1728 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1729 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1730 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1731 break;
1732
1733 case VMMR0_DO_GVMM_SCHED_POKE:
1734 if (pReqHdr || u64Arg)
1735 return VERR_INVALID_PARAMETER;
1736 rc = GVMMR0SchedPoke(pGVM, idCpu);
1737 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1738 break;
1739
1740 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1741 if (u64Arg)
1742 return VERR_INVALID_PARAMETER;
1743 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1744 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1745 break;
1746
1747 case VMMR0_DO_GVMM_SCHED_POLL:
1748 if (pReqHdr || u64Arg > 1)
1749 return VERR_INVALID_PARAMETER;
1750 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
1751 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1752 break;
1753
1754 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1755 if (u64Arg)
1756 return VERR_INVALID_PARAMETER;
1757 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1758 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1759 break;
1760
1761 case VMMR0_DO_GVMM_RESET_STATISTICS:
1762 if (u64Arg)
1763 return VERR_INVALID_PARAMETER;
1764 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1765 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1766 break;
1767
1768 /*
1769 * Initialize the R0 part of a VM instance.
1770 */
1771 case VMMR0_DO_VMMR0_INIT:
1772 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1773 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1774 break;
1775
1776 /*
1777 * Does EMT specific ring-0 init.
1778 */
1779 case VMMR0_DO_VMMR0_INIT_EMT:
1780 rc = vmmR0InitVMEmt(pGVM, idCpu);
1781 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1782 break;
1783
1784 /*
1785 * Terminate the R0 part of a VM instance.
1786 */
1787 case VMMR0_DO_VMMR0_TERM:
1788 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
1789 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1790 break;
1791
1792 /*
1793 * Attempt to enable hm mode and check the current setting.
1794 */
1795 case VMMR0_DO_HM_ENABLE:
1796 rc = HMR0EnableAllCpus(pGVM);
1797 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1798 break;
1799
1800 /*
1801 * Setup the hardware accelerated session.
1802 */
1803 case VMMR0_DO_HM_SETUP_VM:
1804 rc = HMR0SetupVM(pGVM);
1805 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1806 break;
1807
1808 /*
1809 * PGM wrappers.
1810 */
1811 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1812 if (idCpu == NIL_VMCPUID)
1813 return VERR_INVALID_CPU_ID;
1814 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
1815 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1816 break;
1817
1818 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1819 if (idCpu == NIL_VMCPUID)
1820 return VERR_INVALID_CPU_ID;
1821 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
1822 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1823 break;
1824
1825 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1826 if (idCpu == NIL_VMCPUID)
1827 return VERR_INVALID_CPU_ID;
1828 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, idCpu);
1829 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1830 break;
1831
1832 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1833 if (idCpu != 0)
1834 return VERR_INVALID_CPU_ID;
1835 rc = PGMR0PhysSetupIoMmu(pGVM);
1836 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1837 break;
1838
1839 case VMMR0_DO_PGM_POOL_GROW:
1840 if (idCpu == NIL_VMCPUID)
1841 return VERR_INVALID_CPU_ID;
1842 rc = PGMR0PoolGrow(pGVM);
1843 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1844 break;
1845
1846 /*
1847 * GMM wrappers.
1848 */
1849 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1850 if (u64Arg)
1851 return VERR_INVALID_PARAMETER;
1852 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1853 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1854 break;
1855
1856 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1857 if (u64Arg)
1858 return VERR_INVALID_PARAMETER;
1859 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1860 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1861 break;
1862
1863 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1864 if (u64Arg)
1865 return VERR_INVALID_PARAMETER;
1866 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1867 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1868 break;
1869
1870 case VMMR0_DO_GMM_FREE_PAGES:
1871 if (u64Arg)
1872 return VERR_INVALID_PARAMETER;
1873 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1874 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1875 break;
1876
1877 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1878 if (u64Arg)
1879 return VERR_INVALID_PARAMETER;
1880 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1881 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1882 break;
1883
1884 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1885 if (u64Arg)
1886 return VERR_INVALID_PARAMETER;
1887 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1888 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1889 break;
1890
1891 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1892 if (idCpu == NIL_VMCPUID)
1893 return VERR_INVALID_CPU_ID;
1894 if (u64Arg)
1895 return VERR_INVALID_PARAMETER;
1896 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1897 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1898 break;
1899
1900 case VMMR0_DO_GMM_BALLOONED_PAGES:
1901 if (u64Arg)
1902 return VERR_INVALID_PARAMETER;
1903 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1904 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1905 break;
1906
1907 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1908 if (u64Arg)
1909 return VERR_INVALID_PARAMETER;
1910 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1911 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1912 break;
1913
1914 case VMMR0_DO_GMM_SEED_CHUNK:
1915 if (pReqHdr)
1916 return VERR_INVALID_PARAMETER;
1917 rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg);
1918 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1919 break;
1920
1921 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1922 if (idCpu == NIL_VMCPUID)
1923 return VERR_INVALID_CPU_ID;
1924 if (u64Arg)
1925 return VERR_INVALID_PARAMETER;
1926 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1927 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1928 break;
1929
1930 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1931 if (idCpu == NIL_VMCPUID)
1932 return VERR_INVALID_CPU_ID;
1933 if (u64Arg)
1934 return VERR_INVALID_PARAMETER;
1935 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1936 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1937 break;
1938
1939 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1940 if (idCpu == NIL_VMCPUID)
1941 return VERR_INVALID_CPU_ID;
1942 if ( u64Arg
1943 || pReqHdr)
1944 return VERR_INVALID_PARAMETER;
1945 rc = GMMR0ResetSharedModules(pGVM, idCpu);
1946 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1947 break;
1948
1949#ifdef VBOX_WITH_PAGE_SHARING
1950 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1951 {
1952 if (idCpu == NIL_VMCPUID)
1953 return VERR_INVALID_CPU_ID;
1954 if ( u64Arg
1955 || pReqHdr)
1956 return VERR_INVALID_PARAMETER;
1957 rc = GMMR0CheckSharedModules(pGVM, idCpu);
1958 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1959 break;
1960 }
1961#endif
1962
1963#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1964 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1965 if (u64Arg)
1966 return VERR_INVALID_PARAMETER;
1967 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1968 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1969 break;
1970#endif
1971
1972 case VMMR0_DO_GMM_QUERY_STATISTICS:
1973 if (u64Arg)
1974 return VERR_INVALID_PARAMETER;
1975 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1976 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1977 break;
1978
1979 case VMMR0_DO_GMM_RESET_STATISTICS:
1980 if (u64Arg)
1981 return VERR_INVALID_PARAMETER;
1982 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1983 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1984 break;
1985
1986 /*
1987 * A quick GCFGM mock-up.
1988 */
1989 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1990 case VMMR0_DO_GCFGM_SET_VALUE:
1991 case VMMR0_DO_GCFGM_QUERY_VALUE:
1992 {
1993 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1994 return VERR_INVALID_PARAMETER;
1995 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1996 if (pReq->Hdr.cbReq != sizeof(*pReq))
1997 return VERR_INVALID_PARAMETER;
1998 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1999 {
2000 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2001 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2002 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
2003 }
2004 else
2005 {
2006 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2007 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2008 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
2009 }
2010 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2011 break;
2012 }
2013
2014 /*
2015 * PDM Wrappers.
2016 */
2017 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
2018 {
2019 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2020 return VERR_INVALID_PARAMETER;
2021 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
2022 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2023 break;
2024 }
2025
2026 case VMMR0_DO_PDM_DEVICE_CREATE:
2027 {
2028 if (!pReqHdr || u64Arg || idCpu != 0)
2029 return VERR_INVALID_PARAMETER;
2030 rc = PDMR0DeviceCreateReqHandler(pGVM, (PPDMDEVICECREATEREQ)pReqHdr);
2031 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2032 break;
2033 }
2034
2035 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2036 {
2037 if (!pReqHdr || u64Arg)
2038 return VERR_INVALID_PARAMETER;
2039 rc = PDMR0DeviceGenCallReqHandler(pGVM, (PPDMDEVICEGENCALLREQ)pReqHdr, idCpu);
2040 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2041 break;
2042 }
2043
2044 /** @todo Remove the once all devices has been converted to new style! @bugref{9218} */
2045 case VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT:
2046 {
2047 if (!pReqHdr || u64Arg || idCpu != 0)
2048 return VERR_INVALID_PARAMETER;
2049 rc = PDMR0DeviceCompatSetCritSectReqHandler(pGVM, (PPDMDEVICECOMPATSETCRITSECTREQ)pReqHdr);
2050 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2051 break;
2052 }
2053
2054 /*
2055 * Requests to the internal networking service.
2056 */
2057 case VMMR0_DO_INTNET_OPEN:
2058 {
2059 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2060 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2061 return VERR_INVALID_PARAMETER;
2062 rc = IntNetR0OpenReq(pSession, pReq);
2063 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2064 break;
2065 }
2066
2067 case VMMR0_DO_INTNET_IF_CLOSE:
2068 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2069 return VERR_INVALID_PARAMETER;
2070 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2071 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2072 break;
2073
2074
2075 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2076 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2077 return VERR_INVALID_PARAMETER;
2078 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2079 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2080 break;
2081
2082 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2083 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2084 return VERR_INVALID_PARAMETER;
2085 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2086 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2087 break;
2088
2089 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2090 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2091 return VERR_INVALID_PARAMETER;
2092 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2093 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2094 break;
2095
2096 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2097 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2098 return VERR_INVALID_PARAMETER;
2099 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2100 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2101 break;
2102
2103 case VMMR0_DO_INTNET_IF_SEND:
2104 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2105 return VERR_INVALID_PARAMETER;
2106 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2107 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2108 break;
2109
2110 case VMMR0_DO_INTNET_IF_WAIT:
2111 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2112 return VERR_INVALID_PARAMETER;
2113 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2114 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2115 break;
2116
2117 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2118 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2119 return VERR_INVALID_PARAMETER;
2120 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2121 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2122 break;
2123
2124#if 0 //def VBOX_WITH_PCI_PASSTHROUGH
2125 /*
2126 * Requests to host PCI driver service.
2127 */
2128 case VMMR0_DO_PCIRAW_REQ:
2129 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2130 return VERR_INVALID_PARAMETER;
2131 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2132 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2133 break;
2134#endif
2135
2136 /*
2137 * NEM requests.
2138 */
2139#ifdef VBOX_WITH_NEM_R0
2140# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2141 case VMMR0_DO_NEM_INIT_VM:
2142 if (u64Arg || pReqHdr || idCpu != 0)
2143 return VERR_INVALID_PARAMETER;
2144 rc = NEMR0InitVM(pGVM);
2145 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2146 break;
2147
2148 case VMMR0_DO_NEM_INIT_VM_PART_2:
2149 if (u64Arg || pReqHdr || idCpu != 0)
2150 return VERR_INVALID_PARAMETER;
2151 rc = NEMR0InitVMPart2(pGVM);
2152 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2153 break;
2154
2155 case VMMR0_DO_NEM_MAP_PAGES:
2156 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2157 return VERR_INVALID_PARAMETER;
2158 rc = NEMR0MapPages(pGVM, idCpu);
2159 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2160 break;
2161
2162 case VMMR0_DO_NEM_UNMAP_PAGES:
2163 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2164 return VERR_INVALID_PARAMETER;
2165 rc = NEMR0UnmapPages(pGVM, idCpu);
2166 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2167 break;
2168
2169 case VMMR0_DO_NEM_EXPORT_STATE:
2170 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2171 return VERR_INVALID_PARAMETER;
2172 rc = NEMR0ExportState(pGVM, idCpu);
2173 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2174 break;
2175
2176 case VMMR0_DO_NEM_IMPORT_STATE:
2177 if (pReqHdr || idCpu == NIL_VMCPUID)
2178 return VERR_INVALID_PARAMETER;
2179 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2180 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2181 break;
2182
2183 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2184 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2185 return VERR_INVALID_PARAMETER;
2186 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2187 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2188 break;
2189
2190 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2191 if (pReqHdr || idCpu == NIL_VMCPUID)
2192 return VERR_INVALID_PARAMETER;
2193 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2194 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2195 break;
2196
2197 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2198 if (u64Arg || pReqHdr)
2199 return VERR_INVALID_PARAMETER;
2200 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2201 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2202 break;
2203
2204# if 1 && defined(DEBUG_bird)
2205 case VMMR0_DO_NEM_EXPERIMENT:
2206 if (pReqHdr)
2207 return VERR_INVALID_PARAMETER;
2208 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2209 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2210 break;
2211# endif
2212# endif
2213#endif
2214
2215 /*
2216 * IOM requests.
2217 */
2218 case VMMR0_DO_IOM_GROW_IO_PORTS:
2219 {
2220 if (pReqHdr || idCpu != 0)
2221 return VERR_INVALID_PARAMETER;
2222 rc = IOMR0IoPortGrowRegistrationTables(pGVM, u64Arg);
2223 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2224 break;
2225 }
2226
2227 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2228 {
2229 if (pReqHdr || idCpu != 0)
2230 return VERR_INVALID_PARAMETER;
2231 rc = IOMR0IoPortGrowStatisticsTable(pGVM, u64Arg);
2232 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2233 break;
2234 }
2235
2236 case VMMR0_DO_IOM_GROW_MMIO_REGS:
2237 {
2238 if (pReqHdr || idCpu != 0)
2239 return VERR_INVALID_PARAMETER;
2240 rc = IOMR0MmioGrowRegistrationTables(pGVM, u64Arg);
2241 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2242 break;
2243 }
2244
2245 case VMMR0_DO_IOM_GROW_MMIO_STATS:
2246 {
2247 if (pReqHdr || idCpu != 0)
2248 return VERR_INVALID_PARAMETER;
2249 rc = IOMR0MmioGrowStatisticsTable(pGVM, u64Arg);
2250 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2251 break;
2252 }
2253
2254 case VMMR0_DO_IOM_SYNC_STATS_INDICES:
2255 {
2256 if (pReqHdr || idCpu != 0)
2257 return VERR_INVALID_PARAMETER;
2258 rc = IOMR0IoPortSyncStatisticsIndices(pGVM);
2259 if (RT_SUCCESS(rc))
2260 rc = IOMR0MmioSyncStatisticsIndices(pGVM);
2261 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2262 break;
2263 }
2264
2265 /*
2266 * DBGF requests.
2267 */
2268#ifdef VBOX_WITH_DBGF_TRACING
2269 case VMMR0_DO_DBGF_TRACER_CREATE:
2270 {
2271 if (!pReqHdr || u64Arg || idCpu != 0)
2272 return VERR_INVALID_PARAMETER;
2273 rc = DBGFR0TracerCreateReqHandler(pGVM, (PDBGFTRACERCREATEREQ)pReqHdr);
2274 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2275 break;
2276 }
2277
2278 case VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER:
2279 {
2280 if (!pReqHdr || u64Arg)
2281 return VERR_INVALID_PARAMETER;
2282# if 0 /** @todo */
2283 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu);
2284# else
2285 rc = VERR_NOT_IMPLEMENTED;
2286# endif
2287 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2288 break;
2289 }
2290#endif
2291
2292 case VMMR0_DO_DBGF_BP_INIT:
2293 {
2294 if (!pReqHdr || u64Arg || idCpu != 0)
2295 return VERR_INVALID_PARAMETER;
2296 rc = DBGFR0BpInitReqHandler(pGVM, (PDBGFBPINITREQ)pReqHdr);
2297 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2298 break;
2299 }
2300
2301 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2302 {
2303 if (!pReqHdr || u64Arg || idCpu != 0)
2304 return VERR_INVALID_PARAMETER;
2305 rc = DBGFR0BpChunkAllocReqHandler(pGVM, (PDBGFBPCHUNKALLOCREQ)pReqHdr);
2306 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2307 break;
2308 }
2309
2310 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2311 {
2312 if (!pReqHdr || u64Arg || idCpu != 0)
2313 return VERR_INVALID_PARAMETER;
2314 rc = DBGFR0BpL2TblChunkAllocReqHandler(pGVM, (PDBGFBPL2TBLCHUNKALLOCREQ)pReqHdr);
2315 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2316 break;
2317 }
2318
2319 case VMMR0_DO_DBGF_BP_OWNER_INIT:
2320 {
2321 if (!pReqHdr || u64Arg || idCpu != 0)
2322 return VERR_INVALID_PARAMETER;
2323 rc = DBGFR0BpOwnerInitReqHandler(pGVM, (PDBGFBPOWNERINITREQ)pReqHdr);
2324 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2325 break;
2326 }
2327
2328 /*
2329 * TM requests.
2330 */
2331 case VMMR0_DO_TM_GROW_TIMER_QUEUE:
2332 {
2333 if (pReqHdr || idCpu == NIL_VMCPUID)
2334 return VERR_INVALID_PARAMETER;
2335 rc = TMR0TimerQueueGrow(pGVM, RT_HI_U32(u64Arg), RT_LO_U32(u64Arg));
2336 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2337 break;
2338 }
2339
2340 /*
2341 * For profiling.
2342 */
2343 case VMMR0_DO_NOP:
2344 case VMMR0_DO_SLOW_NOP:
2345 return VINF_SUCCESS;
2346
2347 /*
2348 * For testing Ring-0 APIs invoked in this environment.
2349 */
2350 case VMMR0_DO_TESTS:
2351 /** @todo make new test */
2352 return VINF_SUCCESS;
2353
2354 default:
2355 /*
2356 * We're returning VERR_NOT_SUPPORT here so we've got something else
2357 * than -1 which the interrupt gate glue code might return.
2358 */
2359 Log(("operation %#x is not supported\n", enmOperation));
2360 return VERR_NOT_SUPPORTED;
2361 }
2362 return rc;
2363}
2364
2365
2366/**
2367 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
2368 */
2369typedef struct VMMR0ENTRYEXARGS
2370{
2371 PGVM pGVM;
2372 VMCPUID idCpu;
2373 VMMR0OPERATION enmOperation;
2374 PSUPVMMR0REQHDR pReq;
2375 uint64_t u64Arg;
2376 PSUPDRVSESSION pSession;
2377} VMMR0ENTRYEXARGS;
2378/** Pointer to a vmmR0EntryExWrapper argument package. */
2379typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
2380
2381/**
2382 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2383 *
2384 * @returns VBox status code.
2385 * @param pvArgs The argument package
2386 */
2387static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2388{
2389 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2390 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2391 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2392 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2393 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2394 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2395}
2396
2397
2398/**
2399 * The Ring 0 entry point, called by the support library (SUP).
2400 *
2401 * @returns VBox status code.
2402 * @param pGVM The global (ring-0) VM structure.
2403 * @param pVM The cross context VM structure.
2404 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2405 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2406 * @param enmOperation Which operation to execute.
2407 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2408 * @param u64Arg Some simple constant argument.
2409 * @param pSession The session of the caller.
2410 * @remarks Assume called with interrupts _enabled_.
2411 */
2412VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2413 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2414{
2415 /*
2416 * Requests that should only happen on the EMT thread will be
2417 * wrapped in a setjmp so we can assert without causing trouble.
2418 */
2419 if ( pVM != NULL
2420 && pGVM != NULL
2421 && pVM == pGVM /** @todo drop pVM or pGVM */
2422 && idCpu < pGVM->cCpus
2423 && pGVM->pSession == pSession
2424 && pGVM->pSelf == pVM)
2425 {
2426 switch (enmOperation)
2427 {
2428 /* These might/will be called before VMMR3Init. */
2429 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2430 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2431 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2432 case VMMR0_DO_GMM_FREE_PAGES:
2433 case VMMR0_DO_GMM_BALLOONED_PAGES:
2434 /* On the mac we might not have a valid jmp buf, so check these as well. */
2435 case VMMR0_DO_VMMR0_INIT:
2436 case VMMR0_DO_VMMR0_TERM:
2437
2438 case VMMR0_DO_PDM_DEVICE_CREATE:
2439 case VMMR0_DO_PDM_DEVICE_GEN_CALL:
2440 case VMMR0_DO_IOM_GROW_IO_PORTS:
2441 case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
2442 case VMMR0_DO_DBGF_BP_INIT:
2443 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
2444 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
2445 {
2446 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2447 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2448 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2449 && pGVCpu->hNativeThreadR0 == hNativeThread))
2450 {
2451 if (!pGVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2452 break;
2453
2454 /** @todo validate this EMT claim... GVM knows. */
2455 VMMR0ENTRYEXARGS Args;
2456 Args.pGVM = pGVM;
2457 Args.idCpu = idCpu;
2458 Args.enmOperation = enmOperation;
2459 Args.pReq = pReq;
2460 Args.u64Arg = u64Arg;
2461 Args.pSession = pSession;
2462 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2463 }
2464 return VERR_VM_THREAD_NOT_EMT;
2465 }
2466
2467 default:
2468 case VMMR0_DO_PGM_POOL_GROW:
2469 break;
2470 }
2471 }
2472 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2473}
2474
2475
2476/**
2477 * Checks whether we've armed the ring-0 long jump machinery.
2478 *
2479 * @returns @c true / @c false
2480 * @param pVCpu The cross context virtual CPU structure.
2481 * @thread EMT
2482 * @sa VMMIsLongJumpArmed
2483 */
2484VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2485{
2486#ifdef RT_ARCH_X86
2487 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2488 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2489#else
2490 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2491 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2492#endif
2493}
2494
2495
2496/**
2497 * Checks whether we've done a ring-3 long jump.
2498 *
2499 * @returns @c true / @c false
2500 * @param pVCpu The cross context virtual CPU structure.
2501 * @thread EMT
2502 */
2503VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2504{
2505 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2506}
2507
2508
2509/**
2510 * Internal R0 logger worker: Flush logger.
2511 *
2512 * @param pLogger The logger instance to flush.
2513 * @remark This function must be exported!
2514 */
2515VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2516{
2517#ifdef LOG_ENABLED
2518 /*
2519 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2520 * (This is a bit paranoid code.)
2521 */
2522 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_UOFFSETOF(VMMR0LOGGER, Logger));
2523 if ( !VALID_PTR(pR0Logger)
2524 || !VALID_PTR(pR0Logger + 1)
2525 || pLogger->u32Magic != RTLOGGER_MAGIC)
2526 {
2527# ifdef DEBUG
2528 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2529# endif
2530 return;
2531 }
2532 if (pR0Logger->fFlushingDisabled)
2533 return; /* quietly */
2534
2535 PVMCC pVM = pR0Logger->pVM;
2536 if ( !VALID_PTR(pVM)
2537 || pVM->pSelf != pVM)
2538 {
2539# ifdef DEBUG
2540 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pSelf=%p! pLogger=%p\n", pVM, pVM->pSelf, pLogger);
2541# endif
2542 return;
2543 }
2544
2545 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2546 if (pVCpu)
2547 {
2548 /*
2549 * Check that the jump buffer is armed.
2550 */
2551# ifdef RT_ARCH_X86
2552 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2553 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2554# else
2555 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2556 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2557# endif
2558 {
2559# ifdef DEBUG
2560 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2561# endif
2562 return;
2563 }
2564 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2565 }
2566# ifdef DEBUG
2567 else
2568 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2569# endif
2570#else
2571 NOREF(pLogger);
2572#endif /* LOG_ENABLED */
2573}
2574
2575#ifdef LOG_ENABLED
2576
2577/**
2578 * Disables flushing of the ring-0 debug log.
2579 *
2580 * @param pVCpu The cross context virtual CPU structure.
2581 */
2582VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPUCC pVCpu)
2583{
2584 if (pVCpu->vmm.s.pR0LoggerR0)
2585 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2586 if (pVCpu->vmm.s.pR0RelLoggerR0)
2587 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = true;
2588}
2589
2590
2591/**
2592 * Enables flushing of the ring-0 debug log.
2593 *
2594 * @param pVCpu The cross context virtual CPU structure.
2595 */
2596VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPUCC pVCpu)
2597{
2598 if (pVCpu->vmm.s.pR0LoggerR0)
2599 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2600 if (pVCpu->vmm.s.pR0RelLoggerR0)
2601 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = false;
2602}
2603
2604
2605/**
2606 * Checks if log flushing is disabled or not.
2607 *
2608 * @param pVCpu The cross context virtual CPU structure.
2609 */
2610VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPUCC pVCpu)
2611{
2612 if (pVCpu->vmm.s.pR0LoggerR0)
2613 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2614 if (pVCpu->vmm.s.pR0RelLoggerR0)
2615 return pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled;
2616 return true;
2617}
2618
2619#endif /* LOG_ENABLED */
2620
2621/*
2622 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
2623 */
2624DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
2625{
2626 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
2627 if (pGVCpu)
2628 {
2629 PVMCPUCC pVCpu = pGVCpu;
2630 if (RT_VALID_PTR(pVCpu))
2631 {
2632 PVMMR0LOGGER pVmmLogger = pVCpu->vmm.s.pR0RelLoggerR0;
2633 if (RT_VALID_PTR(pVmmLogger))
2634 {
2635 if ( pVmmLogger->fCreated
2636 && pVmmLogger->pVM == pGVCpu->pGVM)
2637 {
2638 if (pVmmLogger->Logger.fFlags & RTLOGFLAGS_DISABLED)
2639 return NULL;
2640 uint16_t const fFlags = RT_LO_U16(fFlagsAndGroup);
2641 uint16_t const iGroup = RT_HI_U16(fFlagsAndGroup);
2642 if ( iGroup != UINT16_MAX
2643 && ( ( pVmmLogger->Logger.afGroups[iGroup < pVmmLogger->Logger.cGroups ? iGroup : 0]
2644 & (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED))
2645 != (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED)))
2646 return NULL;
2647 return &pVmmLogger->Logger;
2648 }
2649 }
2650 }
2651 }
2652 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
2653}
2654
2655
2656/*
2657 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2658 *
2659 * @returns true if the breakpoint should be hit, false if it should be ignored.
2660 */
2661DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2662{
2663#if 0
2664 return true;
2665#else
2666 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2667 if (pVM)
2668 {
2669 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2670
2671 if (pVCpu)
2672 {
2673# ifdef RT_ARCH_X86
2674 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2675 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2676# else
2677 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2678 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2679# endif
2680 {
2681 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2682 return RT_FAILURE_NP(rc);
2683 }
2684 }
2685 }
2686# ifdef RT_OS_LINUX
2687 return true;
2688# else
2689 return false;
2690# endif
2691#endif
2692}
2693
2694
2695/*
2696 * Override this so we can push it up to ring-3.
2697 */
2698DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2699{
2700 /*
2701 * To the log.
2702 */
2703 LogAlways(("\n!!R0-Assertion Failed!!\n"
2704 "Expression: %s\n"
2705 "Location : %s(%d) %s\n",
2706 pszExpr, pszFile, uLine, pszFunction));
2707
2708 /*
2709 * To the global VMM buffer.
2710 */
2711 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2712 if (pVM)
2713 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2714 "\n!!R0-Assertion Failed!!\n"
2715 "Expression: %.*s\n"
2716 "Location : %s(%d) %s\n",
2717 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2718 pszFile, uLine, pszFunction);
2719
2720 /*
2721 * Continue the normal way.
2722 */
2723 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2724}
2725
2726
2727/**
2728 * Callback for RTLogFormatV which writes to the ring-3 log port.
2729 * See PFNLOGOUTPUT() for details.
2730 */
2731static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2732{
2733 for (size_t i = 0; i < cbChars; i++)
2734 {
2735 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2736 }
2737
2738 NOREF(pv);
2739 return cbChars;
2740}
2741
2742
2743/*
2744 * Override this so we can push it up to ring-3.
2745 */
2746DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2747{
2748 va_list vaCopy;
2749
2750 /*
2751 * Push the message to the loggers.
2752 */
2753 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2754 if (pLog)
2755 {
2756 va_copy(vaCopy, va);
2757 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2758 va_end(vaCopy);
2759 }
2760 pLog = RTLogRelGetDefaultInstance();
2761 if (pLog)
2762 {
2763 va_copy(vaCopy, va);
2764 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2765 va_end(vaCopy);
2766 }
2767
2768 /*
2769 * Push it to the global VMM buffer.
2770 */
2771 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2772 if (pVM)
2773 {
2774 va_copy(vaCopy, va);
2775 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2776 va_end(vaCopy);
2777 }
2778
2779 /*
2780 * Continue the normal way.
2781 */
2782 RTAssertMsg2V(pszFormat, va);
2783}
2784
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette