VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 57211

Last change on this file since 57211 was 57109, checked in by vboxsync, 10 years ago

VMM: Check AC during ring-0 module and VM init when the host has SMAP enabled.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 76.0 KB
Line 
1/* $Id: VMMR0.cpp 57109 2015-07-28 11:50:17Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VMM
22#include <VBox/vmm/vmm.h>
23#include <VBox/sup.h>
24#include <VBox/vmm/trpm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/stam.h>
29#include <VBox/vmm/tm.h>
30#include "VMMInternal.h"
31#include <VBox/vmm/vm.h>
32#ifdef VBOX_WITH_PCI_PASSTHROUGH
33# include <VBox/vmm/pdmpci.h>
34#endif
35
36#include <VBox/vmm/gvmm.h>
37#include <VBox/vmm/gmm.h>
38#include <VBox/vmm/gim.h>
39#include <VBox/intnet.h>
40#include <VBox/vmm/hm.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <VBox/version.h>
44#include <VBox/log.h>
45
46#include <iprt/asm-amd64-x86.h>
47#include <iprt/assert.h>
48#include <iprt/crc.h>
49#include <iprt/mp.h>
50#include <iprt/once.h>
51#include <iprt/stdarg.h>
52#include <iprt/string.h>
53#include <iprt/thread.h>
54#include <iprt/timer.h>
55
56#include "dtrace/VBoxVMM.h"
57
58
59#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
60# pragma intrinsic(_AddressOfReturnAddress)
61#endif
62
63
64/*******************************************************************************
65* Defined Constants And Macros *
66*******************************************************************************/
67/** SMAP check setup. */
68#define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
69/** Checks that the AC flag is set if SMAP is enabled. If AC is not set, it
70 * will be logged and @a a_BadExpr is executed. */
71#define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
72 do { \
73 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
74 { \
75 RTCCUINTREG uEFlags = ASMGetFlags(); \
76 if (RT_LIKELY(uEFlags & X86_EFL_AC)) \
77 { /* likely */ } \
78 else \
79 { \
80 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)uEFlags); \
81 a_BadExpr; \
82 } \
83 } \
84 } while (0)
85/** Checks that the AC flag is set if SMAP is enabled. If AC is not set, it
86 * will be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
87 * executed. */
88#define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
89 VMM_CHECK_SMAP_CHECK( \
90 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
91 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)uEFlags); \
92 a_BadExpr)
93
94
95/*******************************************************************************
96* Internal Functions *
97*******************************************************************************/
98RT_C_DECLS_BEGIN
99#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
100extern uint64_t __udivdi3(uint64_t, uint64_t);
101extern uint64_t __umoddi3(uint64_t, uint64_t);
102#endif
103RT_C_DECLS_END
104
105
106/*******************************************************************************
107* Global Variables *
108*******************************************************************************/
109/** Drag in necessary library bits.
110 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
111PFNRT g_VMMR0Deps[] =
112{
113 (PFNRT)RTCrc32,
114 (PFNRT)RTOnce,
115#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
116 (PFNRT)__udivdi3,
117 (PFNRT)__umoddi3,
118#endif
119 NULL
120};
121
122#ifdef RT_OS_SOLARIS
123/* Dependency information for the native solaris loader. */
124extern "C" { char _depends_on[] = "vboxdrv"; }
125#endif
126
127
128
129/**
130 * Initialize the module.
131 * This is called when we're first loaded.
132 *
133 * @returns 0 on success.
134 * @returns VBox status on failure.
135 * @param hMod Image handle for use in APIs.
136 */
137DECLEXPORT(int) ModuleInit(void *hMod)
138{
139 VMM_CHECK_SMAP_SETUP();
140 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
141
142#ifdef VBOX_WITH_DTRACE_R0
143 /*
144 * The first thing to do is register the static tracepoints.
145 * (Deregistration is automatic.)
146 */
147 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
148 if (RT_FAILURE(rc2))
149 return rc2;
150#endif
151 LogFlow(("ModuleInit:\n"));
152
153#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
154 /*
155 * Display the CMOS debug code.
156 */
157 ASMOutU8(0x72, 0x03);
158 uint8_t bDebugCode = ASMInU8(0x73);
159 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
160 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
161#endif
162
163 /*
164 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
165 */
166 int rc = vmmInitFormatTypes();
167 if (RT_SUCCESS(rc))
168 {
169 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
170 rc = GVMMR0Init();
171 if (RT_SUCCESS(rc))
172 {
173 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
174 rc = GMMR0Init();
175 if (RT_SUCCESS(rc))
176 {
177 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
178 rc = HMR0Init();
179 if (RT_SUCCESS(rc))
180 {
181 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
182 rc = PGMRegisterStringFormatTypes();
183 if (RT_SUCCESS(rc))
184 {
185 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
186#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
187 rc = PGMR0DynMapInit();
188#endif
189 if (RT_SUCCESS(rc))
190 {
191 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
192 rc = IntNetR0Init();
193 if (RT_SUCCESS(rc))
194 {
195#ifdef VBOX_WITH_PCI_PASSTHROUGH
196 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
197 rc = PciRawR0Init();
198#endif
199 if (RT_SUCCESS(rc))
200 {
201 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
202 rc = CPUMR0ModuleInit();
203 if (RT_SUCCESS(rc))
204 {
205#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
206 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
207 rc = vmmR0TripleFaultHackInit();
208 if (RT_SUCCESS(rc))
209#endif
210 {
211 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
212 if (RT_SUCCESS(rc))
213 {
214 LogFlow(("ModuleInit: returns success.\n"));
215 return VINF_SUCCESS;
216 }
217 }
218
219 /*
220 * Bail out.
221 */
222#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
223 vmmR0TripleFaultHackTerm();
224#endif
225 }
226 else
227 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
228#ifdef VBOX_WITH_PCI_PASSTHROUGH
229 PciRawR0Term();
230#endif
231 }
232 else
233 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
234 IntNetR0Term();
235 }
236 else
237 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
238#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
239 PGMR0DynMapTerm();
240#endif
241 }
242 else
243 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
244 PGMDeregisterStringFormatTypes();
245 }
246 else
247 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
248 HMR0Term();
249 }
250 else
251 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
252 GMMR0Term();
253 }
254 else
255 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
256 GVMMR0Term();
257 }
258 else
259 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
260 vmmTermFormatTypes();
261 }
262 else
263 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
264
265 LogFlow(("ModuleInit: failed %Rrc\n", rc));
266 return rc;
267}
268
269
270/**
271 * Terminate the module.
272 * This is called when we're finally unloaded.
273 *
274 * @param hMod Image handle for use in APIs.
275 */
276DECLEXPORT(void) ModuleTerm(void *hMod)
277{
278 NOREF(hMod);
279 LogFlow(("ModuleTerm:\n"));
280
281 /*
282 * Terminate the CPUM module (Local APIC cleanup).
283 */
284 CPUMR0ModuleTerm();
285
286 /*
287 * Terminate the internal network service.
288 */
289 IntNetR0Term();
290
291 /*
292 * PGM (Darwin), HM and PciRaw global cleanup.
293 */
294#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
295 PGMR0DynMapTerm();
296#endif
297#ifdef VBOX_WITH_PCI_PASSTHROUGH
298 PciRawR0Term();
299#endif
300 PGMDeregisterStringFormatTypes();
301 HMR0Term();
302#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
303 vmmR0TripleFaultHackTerm();
304#endif
305
306 /*
307 * Destroy the GMM and GVMM instances.
308 */
309 GMMR0Term();
310 GVMMR0Term();
311
312 vmmTermFormatTypes();
313
314 LogFlow(("ModuleTerm: returns\n"));
315}
316
317
318/**
319 * Initiates the R0 driver for a particular VM instance.
320 *
321 * @returns VBox status code.
322 *
323 * @param pVM Pointer to the VM.
324 * @param uSvnRev The SVN revision of the ring-3 part.
325 * @param uBuildType Build type indicator.
326 * @thread EMT.
327 */
328static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
329{
330 VMM_CHECK_SMAP_SETUP();
331 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
332
333 /*
334 * Match the SVN revisions and build type.
335 */
336 if (uSvnRev != VMMGetSvnRev())
337 {
338 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
339 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
340 return VERR_VMM_R0_VERSION_MISMATCH;
341 }
342 if (uBuildType != vmmGetBuildType())
343 {
344 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
345 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
346 return VERR_VMM_R0_VERSION_MISMATCH;
347 }
348 if ( !VALID_PTR(pVM)
349 || pVM->pVMR0 != pVM)
350 return VERR_INVALID_PARAMETER;
351
352
353#ifdef LOG_ENABLED
354 /*
355 * Register the EMT R0 logger instance for VCPU 0.
356 */
357 PVMCPU pVCpu = &pVM->aCpus[0];
358
359 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
360 if (pR0Logger)
361 {
362# if 0 /* testing of the logger. */
363 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
364 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
365 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
366 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
367
368 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
369 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
370 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
371 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
372
373 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
374 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
375 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
376 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
377
378 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
379 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
380 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
381 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
382 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
383 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
384
385 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
386 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
387
388 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
389 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
390 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
391# endif
392 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
393 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
394 pR0Logger->fRegistered = true;
395 }
396#endif /* LOG_ENABLED */
397
398 /*
399 * Check if the host supports high resolution timers or not.
400 */
401 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
402 && !RTTimerCanDoHighResolution())
403 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
404
405 /*
406 * Initialize the per VM data for GVMM and GMM.
407 */
408 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
409 int rc = GVMMR0InitVM(pVM);
410// if (RT_SUCCESS(rc))
411// rc = GMMR0InitPerVMData(pVM);
412 if (RT_SUCCESS(rc))
413 {
414 /*
415 * Init HM, CPUM and PGM (Darwin only).
416 */
417 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
418 rc = HMR0InitVM(pVM);
419 if (RT_SUCCESS(rc))
420 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
421 if (RT_SUCCESS(rc))
422 {
423 rc = CPUMR0InitVM(pVM);
424 if (RT_SUCCESS(rc))
425 {
426 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
427#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
428 rc = PGMR0DynMapInitVM(pVM);
429#endif
430 if (RT_SUCCESS(rc))
431 {
432 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
433#ifdef VBOX_WITH_PCI_PASSTHROUGH
434 rc = PciRawR0InitVM(pVM);
435#endif
436 if (RT_SUCCESS(rc))
437 {
438 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
439 rc = GIMR0InitVM(pVM);
440 if (RT_SUCCESS(rc))
441 {
442 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
443 if (RT_SUCCESS(rc))
444 {
445 GVMMR0DoneInitVM(pVM);
446 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
447 return rc;
448 }
449
450 /* bail out*/
451 GIMR0TermVM(pVM);
452 }
453#ifdef VBOX_WITH_PCI_PASSTHROUGH
454 PciRawR0TermVM(pVM);
455#endif
456 }
457 }
458 }
459 HMR0TermVM(pVM);
460 }
461 }
462
463 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
464 return rc;
465}
466
467
468/**
469 * Terminates the R0 bits for a particular VM instance.
470 *
471 * This is normally called by ring-3 as part of the VM termination process, but
472 * may alternatively be called during the support driver session cleanup when
473 * the VM object is destroyed (see GVMM).
474 *
475 * @returns VBox status code.
476 *
477 * @param pVM Pointer to the VM.
478 * @param pGVM Pointer to the global VM structure. Optional.
479 * @thread EMT or session clean up thread.
480 */
481VMMR0_INT_DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
482{
483#ifdef VBOX_WITH_PCI_PASSTHROUGH
484 PciRawR0TermVM(pVM);
485#endif
486
487 /*
488 * Tell GVMM what we're up to and check that we only do this once.
489 */
490 if (GVMMR0DoingTermVM(pVM, pGVM))
491 {
492 GIMR0TermVM(pVM);
493
494 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
495 * here to make sure we don't leak any shared pages if we crash... */
496#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
497 PGMR0DynMapTermVM(pVM);
498#endif
499 HMR0TermVM(pVM);
500 }
501
502 /*
503 * Deregister the logger.
504 */
505 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
506 return VINF_SUCCESS;
507}
508
509
510/**
511 * VMM ring-0 thread-context callback.
512 *
513 * This does common HM state updating and calls the HM-specific thread-context
514 * callback.
515 *
516 * @param enmEvent The thread-context event.
517 * @param pvUser Opaque pointer to the VMCPU.
518 *
519 * @thread EMT(pvUser)
520 */
521static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
522{
523 PVMCPU pVCpu = (PVMCPU)pvUser;
524
525 switch (enmEvent)
526 {
527 case RTTHREADCTXEVENT_IN:
528 {
529 /*
530 * Linux may call us with preemption enabled (really!) but technically we
531 * cannot get preempted here, otherwise we end up in an infinite recursion
532 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
533 * ad infinitum). Let's just disable preemption for now...
534 */
535 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
536 * preemption after doing the callout (one or two functions up the
537 * call chain). */
538 /** @todo r=ramshankar: See @bugref{5313} comment #30. */
539 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
540 RTThreadPreemptDisable(&ParanoidPreemptState);
541
542 /* We need to update the VCPU <-> host CPU mapping. */
543 RTCPUID idHostCpu;
544 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
545 pVCpu->iHostCpuSet = iHostCpuSet;
546 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
547
548 /* In the very unlikely event that the GIP delta for the CPU we're
549 rescheduled needs calculating, try force a return to ring-3.
550 We unfortunately cannot do the measurements right here. */
551 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
552 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
553
554 /* Invoke the HM-specific thread-context callback. */
555 HMR0ThreadCtxCallback(enmEvent, pvUser);
556
557 /* Restore preemption. */
558 RTThreadPreemptRestore(&ParanoidPreemptState);
559 break;
560 }
561
562 case RTTHREADCTXEVENT_OUT:
563 {
564 /* Invoke the HM-specific thread-context callback. */
565 HMR0ThreadCtxCallback(enmEvent, pvUser);
566
567 /*
568 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
569 * have the same host CPU associated with it.
570 */
571 pVCpu->iHostCpuSet = UINT32_MAX;
572 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
573 break;
574 }
575
576 default:
577 /* Invoke the HM-specific thread-context callback. */
578 HMR0ThreadCtxCallback(enmEvent, pvUser);
579 break;
580 }
581}
582
583
584/**
585 * Creates thread switching hook for the current EMT thread.
586 *
587 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
588 * platform does not implement switcher hooks, no hooks will be create and the
589 * member set to NIL_RTTHREADCTXHOOK.
590 *
591 * @returns VBox status code.
592 * @param pVCpu Pointer to the cross context CPU structure.
593 * @thread EMT(pVCpu)
594 */
595VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu)
596{
597 VMCPU_ASSERT_EMT(pVCpu);
598 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
599
600 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
601 if (RT_SUCCESS(rc))
602 return rc;
603
604 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
605 if (rc == VERR_NOT_SUPPORTED)
606 return VINF_SUCCESS;
607
608 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
609 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
610}
611
612
613/**
614 * Destroys the thread switching hook for the specified VCPU.
615 *
616 * @param pVCpu Pointer to the cross context CPU structure.
617 * @remarks Can be called from any thread.
618 */
619VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu)
620{
621 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
622 AssertRC(rc);
623}
624
625
626/**
627 * Disables the thread switching hook for this VCPU (if we got one).
628 *
629 * @param pVCpu Pointer to the cross context CPU structure.
630 * @thread EMT(pVCpu)
631 *
632 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
633 * this call. This means you have to be careful with what you do!
634 */
635VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu)
636{
637 /*
638 * Clear the VCPU <-> host CPU mapping as we've left HM context.
639 * @bugref{7726} comment #19 explains the need for this trick:
640 *
641 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
642 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
643 * longjmp & normal return to ring-3, which opens a window where we may be
644 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
645 * the CPU starts executing a different EMT. Both functions first disables
646 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
647 * an opening for getting preempted.
648 */
649 /** @todo Make HM not need this API! Then we could leave the hooks enabled
650 * all the time. */
651 /** @todo move this into the context hook disabling if(). */
652 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
653
654 /*
655 * Disable the context hook, if we got one.
656 */
657 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
658 {
659 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
660 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
661 AssertRC(rc);
662 }
663}
664
665
666/**
667 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
668 *
669 * @returns true if registered, false otherwise.
670 * @param pVCpu Pointer to the VMCPU.
671 */
672DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
673{
674 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
675}
676
677
678/**
679 * Whether thread-context hooks are registered for this VCPU.
680 *
681 * @returns true if registered, false otherwise.
682 * @param pVCpu Pointer to the VMCPU.
683 */
684VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
685{
686 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
687}
688
689
690#ifdef VBOX_WITH_STATISTICS
691/**
692 * Record return code statistics
693 * @param pVM Pointer to the VM.
694 * @param pVCpu Pointer to the VMCPU.
695 * @param rc The status code.
696 */
697static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
698{
699 /*
700 * Collect statistics.
701 */
702 switch (rc)
703 {
704 case VINF_SUCCESS:
705 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
706 break;
707 case VINF_EM_RAW_INTERRUPT:
708 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
709 break;
710 case VINF_EM_RAW_INTERRUPT_HYPER:
711 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
712 break;
713 case VINF_EM_RAW_GUEST_TRAP:
714 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
715 break;
716 case VINF_EM_RAW_RING_SWITCH:
717 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
718 break;
719 case VINF_EM_RAW_RING_SWITCH_INT:
720 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
721 break;
722 case VINF_EM_RAW_STALE_SELECTOR:
723 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
724 break;
725 case VINF_EM_RAW_IRET_TRAP:
726 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
727 break;
728 case VINF_IOM_R3_IOPORT_READ:
729 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
730 break;
731 case VINF_IOM_R3_IOPORT_WRITE:
732 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
733 break;
734 case VINF_IOM_R3_MMIO_READ:
735 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
736 break;
737 case VINF_IOM_R3_MMIO_WRITE:
738 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
739 break;
740 case VINF_IOM_R3_MMIO_READ_WRITE:
741 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
742 break;
743 case VINF_PATM_HC_MMIO_PATCH_READ:
744 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
745 break;
746 case VINF_PATM_HC_MMIO_PATCH_WRITE:
747 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
748 break;
749 case VINF_CPUM_R3_MSR_READ:
750 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
751 break;
752 case VINF_CPUM_R3_MSR_WRITE:
753 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
754 break;
755 case VINF_EM_RAW_EMULATE_INSTR:
756 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
757 break;
758 case VINF_EM_RAW_EMULATE_IO_BLOCK:
759 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
760 break;
761 case VINF_PATCH_EMULATE_INSTR:
762 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
763 break;
764 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
765 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
766 break;
767 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
768 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
769 break;
770 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
771 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
772 break;
773 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
774 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
775 break;
776 case VINF_CSAM_PENDING_ACTION:
777 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
778 break;
779 case VINF_PGM_SYNC_CR3:
780 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
781 break;
782 case VINF_PATM_PATCH_INT3:
783 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
784 break;
785 case VINF_PATM_PATCH_TRAP_PF:
786 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
787 break;
788 case VINF_PATM_PATCH_TRAP_GP:
789 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
790 break;
791 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
792 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
793 break;
794 case VINF_EM_RESCHEDULE_REM:
795 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
796 break;
797 case VINF_EM_RAW_TO_R3:
798 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
799 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
800 else if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
801 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
802 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES))
803 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
804 else if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
805 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
806 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
807 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
808 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))
809 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
810 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
811 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
812 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3))
813 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
814 else
815 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
816 break;
817
818 case VINF_EM_RAW_TIMER_PENDING:
819 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
820 break;
821 case VINF_EM_RAW_INTERRUPT_PENDING:
822 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
823 break;
824 case VINF_VMM_CALL_HOST:
825 switch (pVCpu->vmm.s.enmCallRing3Operation)
826 {
827 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
828 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
829 break;
830 case VMMCALLRING3_PDM_LOCK:
831 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
832 break;
833 case VMMCALLRING3_PGM_POOL_GROW:
834 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
835 break;
836 case VMMCALLRING3_PGM_LOCK:
837 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
838 break;
839 case VMMCALLRING3_PGM_MAP_CHUNK:
840 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
841 break;
842 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
843 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
844 break;
845 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
846 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
847 break;
848 case VMMCALLRING3_VMM_LOGGER_FLUSH:
849 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
850 break;
851 case VMMCALLRING3_VM_SET_ERROR:
852 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
853 break;
854 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
855 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
856 break;
857 case VMMCALLRING3_VM_R0_ASSERTION:
858 default:
859 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
860 break;
861 }
862 break;
863 case VINF_PATM_DUPLICATE_FUNCTION:
864 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
865 break;
866 case VINF_PGM_CHANGE_MODE:
867 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
868 break;
869 case VINF_PGM_POOL_FLUSH_PENDING:
870 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
871 break;
872 case VINF_EM_PENDING_REQUEST:
873 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
874 break;
875 case VINF_EM_HM_PATCH_TPR_INSTR:
876 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
877 break;
878 default:
879 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
880 break;
881 }
882}
883#endif /* VBOX_WITH_STATISTICS */
884
885
886/**
887 * Unused ring-0 entry point that used to be called from the interrupt gate.
888 *
889 * Will be removed one of the next times we do a major SUPDrv version bump.
890 *
891 * @returns VBox status code.
892 * @param pVM Pointer to the VM.
893 * @param enmOperation Which operation to execute.
894 * @param pvArg Argument to the operation.
895 * @remarks Assume called with interrupts disabled.
896 */
897VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
898{
899 /*
900 * We're returning VERR_NOT_SUPPORT here so we've got something else
901 * than -1 which the interrupt gate glue code might return.
902 */
903 Log(("operation %#x is not supported\n", enmOperation));
904 NOREF(enmOperation); NOREF(pvArg); NOREF(pVM);
905 return VERR_NOT_SUPPORTED;
906}
907
908
909/**
910 * The Ring 0 entry point, called by the fast-ioctl path.
911 *
912 * @param pVM Pointer to the VM.
913 * The return code is stored in pVM->vmm.s.iLastGZRc.
914 * @param idCpu The Virtual CPU ID of the calling EMT.
915 * @param enmOperation Which operation to execute.
916 * @remarks Assume called with interrupts _enabled_.
917 */
918VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
919{
920 /*
921 * Validation.
922 */
923 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
924 return;
925 PVMCPU pVCpu = &pVM->aCpus[idCpu];
926 if (RT_UNLIKELY(pVCpu->hNativeThreadR0 != RTThreadNativeSelf()))
927 return;
928
929 /*
930 * Perform requested operation.
931 */
932 switch (enmOperation)
933 {
934 /*
935 * Switch to GC and run guest raw mode code.
936 * Disable interrupts before doing the world switch.
937 */
938 case VMMR0_DO_RAW_RUN:
939 {
940#ifdef VBOX_WITH_RAW_MODE
941# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
942 /* Some safety precautions first. */
943 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
944 {
945 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
946 break;
947 }
948# endif
949
950 /*
951 * Disable preemption.
952 */
953 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
954 RTThreadPreemptDisable(&PreemptState);
955
956 /*
957 * Get the host CPU identifiers, make sure they are valid and that
958 * we've got a TSC delta for the CPU.
959 */
960 RTCPUID idHostCpu;
961 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
962 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
963 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
964 {
965 /*
966 * Commit the CPU identifiers and update the periodict preemption timer if it's active.
967 */
968# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
969 CPUMR0SetLApic(pVCpu, iHostCpuSet);
970# endif
971 pVCpu->iHostCpuSet = iHostCpuSet;
972 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
973
974 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
975 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
976
977 /*
978 * We might need to disable VT-x if the active switcher turns off paging.
979 */
980 bool fVTxDisabled;
981 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
982 if (RT_SUCCESS(rc))
983 {
984 /*
985 * Disable interrupts and run raw-mode code. The loop is for efficiently
986 * dispatching tracepoints that fired in raw-mode context.
987 */
988 RTCCUINTREG uFlags = ASMIntDisableFlags();
989
990 for (;;)
991 {
992 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
993 TMNotifyStartOfExecution(pVCpu);
994
995 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
996 pVCpu->vmm.s.iLastGZRc = rc;
997
998 TMNotifyEndOfExecution(pVCpu);
999 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1000
1001 if (rc != VINF_VMM_CALL_TRACER)
1002 break;
1003 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
1004 }
1005
1006 /*
1007 * Re-enable VT-x before we dispatch any pending host interrupts and
1008 * re-enables interrupts.
1009 */
1010 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1011
1012 if ( rc == VINF_EM_RAW_INTERRUPT
1013 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1014 TRPMR0DispatchHostInterrupt(pVM);
1015
1016 ASMSetFlags(uFlags);
1017
1018 /* Fire dtrace probe and collect statistics. */
1019 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1020# ifdef VBOX_WITH_STATISTICS
1021 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1022 vmmR0RecordRC(pVM, pVCpu, rc);
1023# endif
1024 }
1025 else
1026 pVCpu->vmm.s.iLastGZRc = rc;
1027
1028 /*
1029 * Invalidate the host CPU identifiers as we restore preemption.
1030 */
1031 pVCpu->iHostCpuSet = UINT32_MAX;
1032 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1033
1034 RTThreadPreemptRestore(&PreemptState);
1035 }
1036 /*
1037 * Invalid CPU set index or TSC delta in need of measuring.
1038 */
1039 else
1040 {
1041 RTThreadPreemptRestore(&PreemptState);
1042 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1043 {
1044 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1045 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1046 0 /*default cTries*/);
1047 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1048 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1049 else
1050 pVCpu->vmm.s.iLastGZRc = rc;
1051 }
1052 else
1053 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1054 }
1055
1056#else /* !VBOX_WITH_RAW_MODE */
1057 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_NOT_SUPPORTED;
1058#endif
1059 break;
1060 }
1061
1062 /*
1063 * Run guest code using the available hardware acceleration technology.
1064 */
1065 case VMMR0_DO_HM_RUN:
1066 {
1067 /*
1068 * Disable preemption.
1069 */
1070 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1071 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1072 RTThreadPreemptDisable(&PreemptState);
1073
1074 /*
1075 * Get the host CPU identifiers, make sure they are valid and that
1076 * we've got a TSC delta for the CPU.
1077 */
1078 RTCPUID idHostCpu;
1079 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1080 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1081 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1082 {
1083 pVCpu->iHostCpuSet = iHostCpuSet;
1084 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1085
1086 /*
1087 * Update the periodic preemption timer if it's active.
1088 */
1089 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1090 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1091
1092#ifdef LOG_ENABLED
1093 /*
1094 * Ugly: Lazy registration of ring 0 loggers.
1095 */
1096 if (pVCpu->idCpu > 0)
1097 {
1098 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
1099 if ( pR0Logger
1100 && RT_UNLIKELY(!pR0Logger->fRegistered))
1101 {
1102 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
1103 pR0Logger->fRegistered = true;
1104 }
1105 }
1106#endif
1107
1108 int rc;
1109 bool fPreemptRestored = false;
1110 if (!HMR0SuspendPending())
1111 {
1112 /*
1113 * Enable the context switching hook.
1114 */
1115 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1116 {
1117 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1118 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1119 }
1120
1121 /*
1122 * Enter HM context.
1123 */
1124 rc = HMR0Enter(pVM, pVCpu);
1125 if (RT_SUCCESS(rc))
1126 {
1127 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1128
1129 /*
1130 * When preemption hooks are in place, enable preemption now that
1131 * we're in HM context.
1132 */
1133 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1134 {
1135 fPreemptRestored = true;
1136 RTThreadPreemptRestore(&PreemptState);
1137 }
1138
1139 /*
1140 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1141 */
1142 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1143
1144 /*
1145 * Assert sanity on the way out. Using manual assertions code here as normal
1146 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1147 */
1148 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1149 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1150 {
1151 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1152 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1153 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1154 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1155 }
1156 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1157 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1158 {
1159 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1160 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1161 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1162 rc = VERR_INVALID_STATE;
1163 }
1164
1165 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1166 }
1167 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1168
1169 /*
1170 * Invalidate the host CPU identifiers before we disable the context
1171 * hook / restore preemption.
1172 */
1173 pVCpu->iHostCpuSet = UINT32_MAX;
1174 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1175
1176 /*
1177 * Disable context hooks. Due to unresolved cleanup issues, we
1178 * cannot leave the hooks enabled when we return to ring-3.
1179 *
1180 * Note! At the moment HM may also have disabled the hook
1181 * when we get here, but the IPRT API handles that.
1182 */
1183 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1184 {
1185 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1186 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1187 }
1188 }
1189 /*
1190 * The system is about to go into suspend mode; go back to ring 3.
1191 */
1192 else
1193 {
1194 rc = VINF_EM_RAW_INTERRUPT;
1195 pVCpu->iHostCpuSet = UINT32_MAX;
1196 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1197 }
1198
1199 /** @todo When HM stops messing with the context hook state, we'll disable
1200 * preemption again before the RTThreadCtxHookDisable call. */
1201 if (!fPreemptRestored)
1202 RTThreadPreemptRestore(&PreemptState);
1203
1204 pVCpu->vmm.s.iLastGZRc = rc;
1205
1206 /* Fire dtrace probe and collect statistics. */
1207 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1208#ifdef VBOX_WITH_STATISTICS
1209 vmmR0RecordRC(pVM, pVCpu, rc);
1210#endif
1211 }
1212 /*
1213 * Invalid CPU set index or TSC delta in need of measuring.
1214 */
1215 else
1216 {
1217 pVCpu->iHostCpuSet = UINT32_MAX;
1218 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1219 RTThreadPreemptRestore(&PreemptState);
1220 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1221 {
1222 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1223 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1224 0 /*default cTries*/);
1225 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1226 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1227 else
1228 pVCpu->vmm.s.iLastGZRc = rc;
1229 }
1230 else
1231 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1232 }
1233 break;
1234 }
1235
1236 /*
1237 * For profiling.
1238 */
1239 case VMMR0_DO_NOP:
1240 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1241 break;
1242
1243 /*
1244 * Impossible.
1245 */
1246 default:
1247 AssertMsgFailed(("%#x\n", enmOperation));
1248 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1249 break;
1250 }
1251}
1252
1253
1254/**
1255 * Validates a session or VM session argument.
1256 *
1257 * @returns true / false accordingly.
1258 * @param pVM Pointer to the VM.
1259 * @param pSession The session argument.
1260 */
1261DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1262{
1263 /* This must be set! */
1264 if (!pSession)
1265 return false;
1266
1267 /* Only one out of the two. */
1268 if (pVM && pClaimedSession)
1269 return false;
1270 if (pVM)
1271 pClaimedSession = pVM->pSession;
1272 return pClaimedSession == pSession;
1273}
1274
1275
1276/**
1277 * VMMR0EntryEx worker function, either called directly or when ever possible
1278 * called thru a longjmp so we can exit safely on failure.
1279 *
1280 * @returns VBox status code.
1281 * @param pVM Pointer to the VM.
1282 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1283 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1284 * @param enmOperation Which operation to execute.
1285 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1286 * The support driver validates this if it's present.
1287 * @param u64Arg Some simple constant argument.
1288 * @param pSession The session of the caller.
1289 * @remarks Assume called with interrupts _enabled_.
1290 */
1291static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1292{
1293 /*
1294 * Common VM pointer validation.
1295 */
1296 if (pVM)
1297 {
1298 if (RT_UNLIKELY( !VALID_PTR(pVM)
1299 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
1300 {
1301 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
1302 return VERR_INVALID_POINTER;
1303 }
1304 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
1305 || pVM->enmVMState > VMSTATE_TERMINATED
1306 || pVM->pVMR0 != pVM))
1307 {
1308 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
1309 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
1310 return VERR_INVALID_POINTER;
1311 }
1312
1313 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
1314 {
1315 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
1316 return VERR_INVALID_PARAMETER;
1317 }
1318 }
1319 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
1320 {
1321 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1322 return VERR_INVALID_PARAMETER;
1323 }
1324
1325
1326 switch (enmOperation)
1327 {
1328 /*
1329 * GVM requests
1330 */
1331 case VMMR0_DO_GVMM_CREATE_VM:
1332 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
1333 return VERR_INVALID_PARAMETER;
1334 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
1335
1336 case VMMR0_DO_GVMM_DESTROY_VM:
1337 if (pReqHdr || u64Arg)
1338 return VERR_INVALID_PARAMETER;
1339 return GVMMR0DestroyVM(pVM);
1340
1341 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1342 {
1343 if (!pVM)
1344 return VERR_INVALID_PARAMETER;
1345 return GVMMR0RegisterVCpu(pVM, idCpu);
1346 }
1347
1348 case VMMR0_DO_GVMM_SCHED_HALT:
1349 if (pReqHdr)
1350 return VERR_INVALID_PARAMETER;
1351 return GVMMR0SchedHalt(pVM, idCpu, u64Arg);
1352
1353 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1354 if (pReqHdr || u64Arg)
1355 return VERR_INVALID_PARAMETER;
1356 return GVMMR0SchedWakeUp(pVM, idCpu);
1357
1358 case VMMR0_DO_GVMM_SCHED_POKE:
1359 if (pReqHdr || u64Arg)
1360 return VERR_INVALID_PARAMETER;
1361 return GVMMR0SchedPoke(pVM, idCpu);
1362
1363 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1364 if (u64Arg)
1365 return VERR_INVALID_PARAMETER;
1366 return GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1367
1368 case VMMR0_DO_GVMM_SCHED_POLL:
1369 if (pReqHdr || u64Arg > 1)
1370 return VERR_INVALID_PARAMETER;
1371 return GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
1372
1373 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1374 if (u64Arg)
1375 return VERR_INVALID_PARAMETER;
1376 return GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
1377
1378 case VMMR0_DO_GVMM_RESET_STATISTICS:
1379 if (u64Arg)
1380 return VERR_INVALID_PARAMETER;
1381 return GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
1382
1383 /*
1384 * Initialize the R0 part of a VM instance.
1385 */
1386 case VMMR0_DO_VMMR0_INIT:
1387 return vmmR0InitVM(pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1388
1389 /*
1390 * Terminate the R0 part of a VM instance.
1391 */
1392 case VMMR0_DO_VMMR0_TERM:
1393 return VMMR0TermVM(pVM, NULL);
1394
1395 /*
1396 * Attempt to enable hm mode and check the current setting.
1397 */
1398 case VMMR0_DO_HM_ENABLE:
1399 return HMR0EnableAllCpus(pVM);
1400
1401 /*
1402 * Setup the hardware accelerated session.
1403 */
1404 case VMMR0_DO_HM_SETUP_VM:
1405 return HMR0SetupVM(pVM);
1406
1407 /*
1408 * Switch to RC to execute Hypervisor function.
1409 */
1410 case VMMR0_DO_CALL_HYPERVISOR:
1411 {
1412#ifdef VBOX_WITH_RAW_MODE
1413 /*
1414 * Validate input / context.
1415 */
1416 if (RT_UNLIKELY(idCpu != 0))
1417 return VERR_INVALID_CPU_ID;
1418 if (RT_UNLIKELY(pVM->cCpus != 1))
1419 return VERR_INVALID_PARAMETER;
1420 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1421# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1422 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1423 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1424# endif
1425
1426 /*
1427 * Disable interrupts.
1428 */
1429 RTCCUINTREG fFlags = ASMIntDisableFlags();
1430
1431 /*
1432 * Get the host CPU identifiers, make sure they are valid and that
1433 * we've got a TSC delta for the CPU.
1434 */
1435 RTCPUID idHostCpu;
1436 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1437 if (RT_UNLIKELY(iHostCpuSet >= RTCPUSET_MAX_CPUS))
1438 {
1439 ASMSetFlags(fFlags);
1440 return VERR_INVALID_CPU_INDEX;
1441 }
1442 if (RT_UNLIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1443 {
1444 ASMSetFlags(fFlags);
1445 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1446 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1447 0 /*default cTries*/);
1448 if (RT_FAILURE(rc) && rc != VERR_CPU_OFFLINE)
1449 return rc;
1450 }
1451
1452 /*
1453 * Commit the CPU identifiers.
1454 */
1455# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1456 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1457# endif
1458 pVCpu->iHostCpuSet = iHostCpuSet;
1459 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1460
1461 /*
1462 * We might need to disable VT-x if the active switcher turns off paging.
1463 */
1464 bool fVTxDisabled;
1465 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1466 if (RT_SUCCESS(rc))
1467 {
1468 /*
1469 * Go through the wormhole...
1470 */
1471 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1472
1473 /*
1474 * Re-enable VT-x before we dispatch any pending host interrupts.
1475 */
1476 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1477
1478 if ( rc == VINF_EM_RAW_INTERRUPT
1479 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1480 TRPMR0DispatchHostInterrupt(pVM);
1481 }
1482
1483 /*
1484 * Invalidate the host CPU identifiers as we restore interrupts.
1485 */
1486 pVCpu->iHostCpuSet = UINT32_MAX;
1487 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1488 ASMSetFlags(fFlags);
1489 return rc;
1490
1491#else /* !VBOX_WITH_RAW_MODE */
1492 return VERR_RAW_MODE_NOT_SUPPORTED;
1493#endif
1494 }
1495
1496 /*
1497 * PGM wrappers.
1498 */
1499 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1500 if (idCpu == NIL_VMCPUID)
1501 return VERR_INVALID_CPU_ID;
1502 return PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
1503
1504 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1505 if (idCpu == NIL_VMCPUID)
1506 return VERR_INVALID_CPU_ID;
1507 return PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu]);
1508
1509 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1510 if (idCpu == NIL_VMCPUID)
1511 return VERR_INVALID_CPU_ID;
1512 return PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
1513
1514 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1515 if (idCpu != 0)
1516 return VERR_INVALID_CPU_ID;
1517 return PGMR0PhysSetupIommu(pVM);
1518
1519 /*
1520 * GMM wrappers.
1521 */
1522 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1523 if (u64Arg)
1524 return VERR_INVALID_PARAMETER;
1525 return GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1526
1527 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1528 if (u64Arg)
1529 return VERR_INVALID_PARAMETER;
1530 return GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1531
1532 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1533 if (u64Arg)
1534 return VERR_INVALID_PARAMETER;
1535 return GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1536
1537 case VMMR0_DO_GMM_FREE_PAGES:
1538 if (u64Arg)
1539 return VERR_INVALID_PARAMETER;
1540 return GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1541
1542 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1543 if (u64Arg)
1544 return VERR_INVALID_PARAMETER;
1545 return GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1546
1547 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1548 if (u64Arg)
1549 return VERR_INVALID_PARAMETER;
1550 return GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
1551
1552 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1553 if (idCpu == NIL_VMCPUID)
1554 return VERR_INVALID_CPU_ID;
1555 if (u64Arg)
1556 return VERR_INVALID_PARAMETER;
1557 return GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1558
1559 case VMMR0_DO_GMM_BALLOONED_PAGES:
1560 if (u64Arg)
1561 return VERR_INVALID_PARAMETER;
1562 return GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1563
1564 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1565 if (u64Arg)
1566 return VERR_INVALID_PARAMETER;
1567 return GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1568
1569 case VMMR0_DO_GMM_SEED_CHUNK:
1570 if (pReqHdr)
1571 return VERR_INVALID_PARAMETER;
1572 return GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
1573
1574 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1575 if (idCpu == NIL_VMCPUID)
1576 return VERR_INVALID_CPU_ID;
1577 if (u64Arg)
1578 return VERR_INVALID_PARAMETER;
1579 return GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1580
1581 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1582 if (idCpu == NIL_VMCPUID)
1583 return VERR_INVALID_CPU_ID;
1584 if (u64Arg)
1585 return VERR_INVALID_PARAMETER;
1586 return GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1587
1588 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1589 if (idCpu == NIL_VMCPUID)
1590 return VERR_INVALID_CPU_ID;
1591 if ( u64Arg
1592 || pReqHdr)
1593 return VERR_INVALID_PARAMETER;
1594 return GMMR0ResetSharedModules(pVM, idCpu);
1595
1596#ifdef VBOX_WITH_PAGE_SHARING
1597 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1598 {
1599 if (idCpu == NIL_VMCPUID)
1600 return VERR_INVALID_CPU_ID;
1601 if ( u64Arg
1602 || pReqHdr)
1603 return VERR_INVALID_PARAMETER;
1604
1605 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1606 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf());
1607
1608# ifdef DEBUG_sandervl
1609 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
1610 /* Todo: this can have bad side effects for unexpected jumps back to r3. */
1611 int rc = GMMR0CheckSharedModulesStart(pVM);
1612 if (rc == VINF_SUCCESS)
1613 {
1614 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
1615 Assert( rc == VINF_SUCCESS
1616 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
1617 GMMR0CheckSharedModulesEnd(pVM);
1618 }
1619# else
1620 int rc = GMMR0CheckSharedModules(pVM, pVCpu);
1621# endif
1622 return rc;
1623 }
1624#endif
1625
1626#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1627 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1628 if (u64Arg)
1629 return VERR_INVALID_PARAMETER;
1630 return GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1631#endif
1632
1633 case VMMR0_DO_GMM_QUERY_STATISTICS:
1634 if (u64Arg)
1635 return VERR_INVALID_PARAMETER;
1636 return GMMR0QueryStatisticsReq(pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1637
1638 case VMMR0_DO_GMM_RESET_STATISTICS:
1639 if (u64Arg)
1640 return VERR_INVALID_PARAMETER;
1641 return GMMR0ResetStatisticsReq(pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1642
1643 /*
1644 * A quick GCFGM mock-up.
1645 */
1646 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1647 case VMMR0_DO_GCFGM_SET_VALUE:
1648 case VMMR0_DO_GCFGM_QUERY_VALUE:
1649 {
1650 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1651 return VERR_INVALID_PARAMETER;
1652 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1653 if (pReq->Hdr.cbReq != sizeof(*pReq))
1654 return VERR_INVALID_PARAMETER;
1655 int rc;
1656 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1657 {
1658 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1659 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1660 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1661 }
1662 else
1663 {
1664 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1665 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1666 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1667 }
1668 return rc;
1669 }
1670
1671 /*
1672 * PDM Wrappers.
1673 */
1674 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1675 {
1676 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1677 return VERR_INVALID_PARAMETER;
1678 return PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1679 }
1680
1681 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1682 {
1683 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1684 return VERR_INVALID_PARAMETER;
1685 return PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1686 }
1687
1688 /*
1689 * Requests to the internal networking service.
1690 */
1691 case VMMR0_DO_INTNET_OPEN:
1692 {
1693 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1694 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1695 return VERR_INVALID_PARAMETER;
1696 return IntNetR0OpenReq(pSession, pReq);
1697 }
1698
1699 case VMMR0_DO_INTNET_IF_CLOSE:
1700 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1701 return VERR_INVALID_PARAMETER;
1702 return IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1703
1704 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1705 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1706 return VERR_INVALID_PARAMETER;
1707 return IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1708
1709 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1710 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1711 return VERR_INVALID_PARAMETER;
1712 return IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1713
1714 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1715 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1716 return VERR_INVALID_PARAMETER;
1717 return IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1718
1719 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1720 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1721 return VERR_INVALID_PARAMETER;
1722 return IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1723
1724 case VMMR0_DO_INTNET_IF_SEND:
1725 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1726 return VERR_INVALID_PARAMETER;
1727 return IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1728
1729 case VMMR0_DO_INTNET_IF_WAIT:
1730 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1731 return VERR_INVALID_PARAMETER;
1732 return IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1733
1734 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1735 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1736 return VERR_INVALID_PARAMETER;
1737 return IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1738
1739#ifdef VBOX_WITH_PCI_PASSTHROUGH
1740 /*
1741 * Requests to host PCI driver service.
1742 */
1743 case VMMR0_DO_PCIRAW_REQ:
1744 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1745 return VERR_INVALID_PARAMETER;
1746 return PciRawR0ProcessReq(pSession, pVM, (PPCIRAWSENDREQ)pReqHdr);
1747#endif
1748 /*
1749 * For profiling.
1750 */
1751 case VMMR0_DO_NOP:
1752 case VMMR0_DO_SLOW_NOP:
1753 return VINF_SUCCESS;
1754
1755 /*
1756 * For testing Ring-0 APIs invoked in this environment.
1757 */
1758 case VMMR0_DO_TESTS:
1759 /** @todo make new test */
1760 return VINF_SUCCESS;
1761
1762
1763#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1764 case VMMR0_DO_TEST_SWITCHER3264:
1765 if (idCpu == NIL_VMCPUID)
1766 return VERR_INVALID_CPU_ID;
1767 return HMR0TestSwitcher3264(pVM);
1768#endif
1769 default:
1770 /*
1771 * We're returning VERR_NOT_SUPPORT here so we've got something else
1772 * than -1 which the interrupt gate glue code might return.
1773 */
1774 Log(("operation %#x is not supported\n", enmOperation));
1775 return VERR_NOT_SUPPORTED;
1776 }
1777}
1778
1779
1780/**
1781 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1782 */
1783typedef struct VMMR0ENTRYEXARGS
1784{
1785 PVM pVM;
1786 VMCPUID idCpu;
1787 VMMR0OPERATION enmOperation;
1788 PSUPVMMR0REQHDR pReq;
1789 uint64_t u64Arg;
1790 PSUPDRVSESSION pSession;
1791} VMMR0ENTRYEXARGS;
1792/** Pointer to a vmmR0EntryExWrapper argument package. */
1793typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1794
1795/**
1796 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1797 *
1798 * @returns VBox status code.
1799 * @param pvArgs The argument package
1800 */
1801static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
1802{
1803 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1804 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1805 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1806 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1807 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1808 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1809}
1810
1811
1812/**
1813 * The Ring 0 entry point, called by the support library (SUP).
1814 *
1815 * @returns VBox status code.
1816 * @param pVM Pointer to the VM.
1817 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1818 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1819 * @param enmOperation Which operation to execute.
1820 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
1821 * @param u64Arg Some simple constant argument.
1822 * @param pSession The session of the caller.
1823 * @remarks Assume called with interrupts _enabled_.
1824 */
1825VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1826{
1827 /*
1828 * Requests that should only happen on the EMT thread will be
1829 * wrapped in a setjmp so we can assert without causing trouble.
1830 */
1831 if ( VALID_PTR(pVM)
1832 && pVM->pVMR0
1833 && idCpu < pVM->cCpus)
1834 {
1835 switch (enmOperation)
1836 {
1837 /* These might/will be called before VMMR3Init. */
1838 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1839 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1840 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1841 case VMMR0_DO_GMM_FREE_PAGES:
1842 case VMMR0_DO_GMM_BALLOONED_PAGES:
1843 /* On the mac we might not have a valid jmp buf, so check these as well. */
1844 case VMMR0_DO_VMMR0_INIT:
1845 case VMMR0_DO_VMMR0_TERM:
1846 {
1847 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1848
1849 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1850 break;
1851
1852 /** @todo validate this EMT claim... GVM knows. */
1853 VMMR0ENTRYEXARGS Args;
1854 Args.pVM = pVM;
1855 Args.idCpu = idCpu;
1856 Args.enmOperation = enmOperation;
1857 Args.pReq = pReq;
1858 Args.u64Arg = u64Arg;
1859 Args.pSession = pSession;
1860 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1861 }
1862
1863 default:
1864 break;
1865 }
1866 }
1867 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
1868}
1869
1870
1871/**
1872 * Checks whether we've armed the ring-0 long jump machinery.
1873 *
1874 * @returns @c true / @c false
1875 * @param pVCpu Pointer to the VMCPU.
1876 * @thread EMT
1877 * @sa VMMIsLongJumpArmed
1878 */
1879VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
1880{
1881#ifdef RT_ARCH_X86
1882 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
1883 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
1884#else
1885 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
1886 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
1887#endif
1888}
1889
1890
1891/**
1892 * Checks whether we've done a ring-3 long jump.
1893 *
1894 * @returns @c true / @c false
1895 * @param pVCpu Pointer to the VMCPU.
1896 * @thread EMT
1897 */
1898VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
1899{
1900 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
1901}
1902
1903
1904/**
1905 * Internal R0 logger worker: Flush logger.
1906 *
1907 * @param pLogger The logger instance to flush.
1908 * @remark This function must be exported!
1909 */
1910VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
1911{
1912#ifdef LOG_ENABLED
1913 /*
1914 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
1915 * (This is a bit paranoid code.)
1916 */
1917 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1918 if ( !VALID_PTR(pR0Logger)
1919 || !VALID_PTR(pR0Logger + 1)
1920 || pLogger->u32Magic != RTLOGGER_MAGIC)
1921 {
1922# ifdef DEBUG
1923 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
1924# endif
1925 return;
1926 }
1927 if (pR0Logger->fFlushingDisabled)
1928 return; /* quietly */
1929
1930 PVM pVM = pR0Logger->pVM;
1931 if ( !VALID_PTR(pVM)
1932 || pVM->pVMR0 != pVM)
1933 {
1934# ifdef DEBUG
1935 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
1936# endif
1937 return;
1938 }
1939
1940 PVMCPU pVCpu = VMMGetCpu(pVM);
1941 if (pVCpu)
1942 {
1943 /*
1944 * Check that the jump buffer is armed.
1945 */
1946# ifdef RT_ARCH_X86
1947 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
1948 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1949# else
1950 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
1951 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1952# endif
1953 {
1954# ifdef DEBUG
1955 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
1956# endif
1957 return;
1958 }
1959 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
1960 }
1961# ifdef DEBUG
1962 else
1963 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
1964# endif
1965#endif
1966}
1967
1968/**
1969 * Internal R0 logger worker: Custom prefix.
1970 *
1971 * @returns Number of chars written.
1972 *
1973 * @param pLogger The logger instance.
1974 * @param pchBuf The output buffer.
1975 * @param cchBuf The size of the buffer.
1976 * @param pvUser User argument (ignored).
1977 */
1978VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1979{
1980 NOREF(pvUser);
1981#ifdef LOG_ENABLED
1982 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1983 if ( !VALID_PTR(pR0Logger)
1984 || !VALID_PTR(pR0Logger + 1)
1985 || pLogger->u32Magic != RTLOGGER_MAGIC
1986 || cchBuf < 2)
1987 return 0;
1988
1989 static const char s_szHex[17] = "0123456789abcdef";
1990 VMCPUID const idCpu = pR0Logger->idCpu;
1991 pchBuf[1] = s_szHex[ idCpu & 15];
1992 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1993
1994 return 2;
1995#else
1996 return 0;
1997#endif
1998}
1999
2000#ifdef LOG_ENABLED
2001
2002/**
2003 * Disables flushing of the ring-0 debug log.
2004 *
2005 * @param pVCpu Pointer to the VMCPU.
2006 */
2007VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
2008{
2009 if (pVCpu->vmm.s.pR0LoggerR0)
2010 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2011}
2012
2013
2014/**
2015 * Enables flushing of the ring-0 debug log.
2016 *
2017 * @param pVCpu Pointer to the VMCPU.
2018 */
2019VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
2020{
2021 if (pVCpu->vmm.s.pR0LoggerR0)
2022 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2023}
2024
2025
2026/**
2027 * Checks if log flushing is disabled or not.
2028 *
2029 * @param pVCpu Pointer to the VMCPU.
2030 */
2031VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
2032{
2033 if (pVCpu->vmm.s.pR0LoggerR0)
2034 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2035 return true;
2036}
2037#endif /* LOG_ENABLED */
2038
2039/**
2040 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2041 *
2042 * @returns true if the breakpoint should be hit, false if it should be ignored.
2043 */
2044DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2045{
2046#if 0
2047 return true;
2048#else
2049 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2050 if (pVM)
2051 {
2052 PVMCPU pVCpu = VMMGetCpu(pVM);
2053
2054 if (pVCpu)
2055 {
2056#ifdef RT_ARCH_X86
2057 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2058 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2059#else
2060 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2061 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2062#endif
2063 {
2064 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2065 return RT_FAILURE_NP(rc);
2066 }
2067 }
2068 }
2069#ifdef RT_OS_LINUX
2070 return true;
2071#else
2072 return false;
2073#endif
2074#endif
2075}
2076
2077
2078/**
2079 * Override this so we can push it up to ring-3.
2080 *
2081 * @param pszExpr Expression. Can be NULL.
2082 * @param uLine Location line number.
2083 * @param pszFile Location file name.
2084 * @param pszFunction Location function name.
2085 */
2086DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2087{
2088 /*
2089 * To the log.
2090 */
2091 LogAlways(("\n!!R0-Assertion Failed!!\n"
2092 "Expression: %s\n"
2093 "Location : %s(%d) %s\n",
2094 pszExpr, pszFile, uLine, pszFunction));
2095
2096 /*
2097 * To the global VMM buffer.
2098 */
2099 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2100 if (pVM)
2101 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2102 "\n!!R0-Assertion Failed!!\n"
2103 "Expression: %s\n"
2104 "Location : %s(%d) %s\n",
2105 pszExpr, pszFile, uLine, pszFunction);
2106
2107 /*
2108 * Continue the normal way.
2109 */
2110 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2111}
2112
2113
2114/**
2115 * Callback for RTLogFormatV which writes to the ring-3 log port.
2116 * See PFNLOGOUTPUT() for details.
2117 */
2118static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2119{
2120 for (size_t i = 0; i < cbChars; i++)
2121 LogAlways(("%c", pachChars[i]));
2122
2123 NOREF(pv);
2124 return cbChars;
2125}
2126
2127
2128/**
2129 * Override this so we can push it up to ring-3.
2130 *
2131 * @param pszFormat The format string.
2132 * @param va Arguments.
2133 */
2134DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2135{
2136 va_list vaCopy;
2137
2138 /*
2139 * Push the message to the loggers.
2140 */
2141 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2142 if (pLog)
2143 {
2144 va_copy(vaCopy, va);
2145 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2146 va_end(vaCopy);
2147 }
2148 pLog = RTLogRelGetDefaultInstance();
2149 if (pLog)
2150 {
2151 va_copy(vaCopy, va);
2152 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2153 va_end(vaCopy);
2154 }
2155
2156 /*
2157 * Push it to the global VMM buffer.
2158 */
2159 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2160 if (pVM)
2161 {
2162 va_copy(vaCopy, va);
2163 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2164 va_end(vaCopy);
2165 }
2166
2167 /*
2168 * Continue the normal way.
2169 */
2170 RTAssertMsg2V(pszFormat, va);
2171}
2172
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette