VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 57229

Last change on this file since 57229 was 57229, checked in by vboxsync, 10 years ago

SUPDrv,VMMR0: Added SUPR0BadContext for reporting AC=0 and (on darwin with VBOX_WITH_EFLAGS_AC_SET_IN_VBOXDRV defined) refuse further I/O control calls. That way we'll sit up and pay attention, hopefully. VBOX_WITH_EFLAGS_AC_SET_IN_VBOXDRV is currently enabled for all non-release builds (odd build numbers), and we check EFLAGS.AC at the end of each ioctrl call.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 76.4 KB
Line 
1/* $Id: VMMR0.cpp 57229 2015-08-06 23:34:04Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VMM
22#include <VBox/vmm/vmm.h>
23#include <VBox/sup.h>
24#include <VBox/vmm/trpm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/stam.h>
29#include <VBox/vmm/tm.h>
30#include "VMMInternal.h"
31#include <VBox/vmm/vm.h>
32#ifdef VBOX_WITH_PCI_PASSTHROUGH
33# include <VBox/vmm/pdmpci.h>
34#endif
35
36#include <VBox/vmm/gvmm.h>
37#include <VBox/vmm/gmm.h>
38#include <VBox/vmm/gim.h>
39#include <VBox/intnet.h>
40#include <VBox/vmm/hm.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <VBox/version.h>
44#include <VBox/log.h>
45
46#include <iprt/asm-amd64-x86.h>
47#include <iprt/assert.h>
48#include <iprt/crc.h>
49#include <iprt/mp.h>
50#include <iprt/once.h>
51#include <iprt/stdarg.h>
52#include <iprt/string.h>
53#include <iprt/thread.h>
54#include <iprt/timer.h>
55
56#include "dtrace/VBoxVMM.h"
57
58
59#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
60# pragma intrinsic(_AddressOfReturnAddress)
61#endif
62
63
64/*******************************************************************************
65* Defined Constants And Macros *
66*******************************************************************************/
67/** SMAP check setup. */
68#define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
69/** Checks that the AC flag is set if SMAP is enabled. If AC is not set, it
70 * will be logged and @a a_BadExpr is executed. */
71#define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
72 do { \
73 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
74 { \
75 RTCCUINTREG fEflCheck = ASMGetFlags(); \
76 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
77 { /* likely */ } \
78 else \
79 { \
80 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
81 a_BadExpr; \
82 } \
83 } \
84 } while (0)
85/** Checks that the AC flag is set if SMAP is enabled. If AC is not set, it
86 * will be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
87 * executed. */
88#define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
89 do { \
90 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
91 { \
92 RTCCUINTREG fEflCheck = ASMGetFlags(); \
93 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
94 { /* likely */ } \
95 else \
96 { \
97 SUPR0BadContext((a_pVM)->pSession, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
98 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
99 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
100 a_BadExpr; \
101 } \
102 } \
103 } while (0)
104
105
106/*******************************************************************************
107* Internal Functions *
108*******************************************************************************/
109RT_C_DECLS_BEGIN
110#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
111extern uint64_t __udivdi3(uint64_t, uint64_t);
112extern uint64_t __umoddi3(uint64_t, uint64_t);
113#endif
114RT_C_DECLS_END
115
116
117/*******************************************************************************
118* Global Variables *
119*******************************************************************************/
120/** Drag in necessary library bits.
121 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
122PFNRT g_VMMR0Deps[] =
123{
124 (PFNRT)RTCrc32,
125 (PFNRT)RTOnce,
126#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
127 (PFNRT)__udivdi3,
128 (PFNRT)__umoddi3,
129#endif
130 NULL
131};
132
133#ifdef RT_OS_SOLARIS
134/* Dependency information for the native solaris loader. */
135extern "C" { char _depends_on[] = "vboxdrv"; }
136#endif
137
138
139
140/**
141 * Initialize the module.
142 * This is called when we're first loaded.
143 *
144 * @returns 0 on success.
145 * @returns VBox status on failure.
146 * @param hMod Image handle for use in APIs.
147 */
148DECLEXPORT(int) ModuleInit(void *hMod)
149{
150 VMM_CHECK_SMAP_SETUP();
151 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
152
153#ifdef VBOX_WITH_DTRACE_R0
154 /*
155 * The first thing to do is register the static tracepoints.
156 * (Deregistration is automatic.)
157 */
158 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
159 if (RT_FAILURE(rc2))
160 return rc2;
161#endif
162 LogFlow(("ModuleInit:\n"));
163
164#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
165 /*
166 * Display the CMOS debug code.
167 */
168 ASMOutU8(0x72, 0x03);
169 uint8_t bDebugCode = ASMInU8(0x73);
170 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
171 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
172#endif
173
174 /*
175 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
176 */
177 int rc = vmmInitFormatTypes();
178 if (RT_SUCCESS(rc))
179 {
180 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
181 rc = GVMMR0Init();
182 if (RT_SUCCESS(rc))
183 {
184 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
185 rc = GMMR0Init();
186 if (RT_SUCCESS(rc))
187 {
188 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
189 rc = HMR0Init();
190 if (RT_SUCCESS(rc))
191 {
192 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
193 rc = PGMRegisterStringFormatTypes();
194 if (RT_SUCCESS(rc))
195 {
196 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
197#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
198 rc = PGMR0DynMapInit();
199#endif
200 if (RT_SUCCESS(rc))
201 {
202 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
203 rc = IntNetR0Init();
204 if (RT_SUCCESS(rc))
205 {
206#ifdef VBOX_WITH_PCI_PASSTHROUGH
207 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
208 rc = PciRawR0Init();
209#endif
210 if (RT_SUCCESS(rc))
211 {
212 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
213 rc = CPUMR0ModuleInit();
214 if (RT_SUCCESS(rc))
215 {
216#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
217 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
218 rc = vmmR0TripleFaultHackInit();
219 if (RT_SUCCESS(rc))
220#endif
221 {
222 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
223 if (RT_SUCCESS(rc))
224 {
225 LogFlow(("ModuleInit: returns success.\n"));
226 return VINF_SUCCESS;
227 }
228 }
229
230 /*
231 * Bail out.
232 */
233#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
234 vmmR0TripleFaultHackTerm();
235#endif
236 }
237 else
238 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
239#ifdef VBOX_WITH_PCI_PASSTHROUGH
240 PciRawR0Term();
241#endif
242 }
243 else
244 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
245 IntNetR0Term();
246 }
247 else
248 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
249#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
250 PGMR0DynMapTerm();
251#endif
252 }
253 else
254 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
255 PGMDeregisterStringFormatTypes();
256 }
257 else
258 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
259 HMR0Term();
260 }
261 else
262 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
263 GMMR0Term();
264 }
265 else
266 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
267 GVMMR0Term();
268 }
269 else
270 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
271 vmmTermFormatTypes();
272 }
273 else
274 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
275
276 LogFlow(("ModuleInit: failed %Rrc\n", rc));
277 return rc;
278}
279
280
281/**
282 * Terminate the module.
283 * This is called when we're finally unloaded.
284 *
285 * @param hMod Image handle for use in APIs.
286 */
287DECLEXPORT(void) ModuleTerm(void *hMod)
288{
289 NOREF(hMod);
290 LogFlow(("ModuleTerm:\n"));
291
292 /*
293 * Terminate the CPUM module (Local APIC cleanup).
294 */
295 CPUMR0ModuleTerm();
296
297 /*
298 * Terminate the internal network service.
299 */
300 IntNetR0Term();
301
302 /*
303 * PGM (Darwin), HM and PciRaw global cleanup.
304 */
305#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
306 PGMR0DynMapTerm();
307#endif
308#ifdef VBOX_WITH_PCI_PASSTHROUGH
309 PciRawR0Term();
310#endif
311 PGMDeregisterStringFormatTypes();
312 HMR0Term();
313#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
314 vmmR0TripleFaultHackTerm();
315#endif
316
317 /*
318 * Destroy the GMM and GVMM instances.
319 */
320 GMMR0Term();
321 GVMMR0Term();
322
323 vmmTermFormatTypes();
324
325 LogFlow(("ModuleTerm: returns\n"));
326}
327
328
329/**
330 * Initiates the R0 driver for a particular VM instance.
331 *
332 * @returns VBox status code.
333 *
334 * @param pVM Pointer to the VM.
335 * @param uSvnRev The SVN revision of the ring-3 part.
336 * @param uBuildType Build type indicator.
337 * @thread EMT.
338 */
339static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
340{
341 VMM_CHECK_SMAP_SETUP();
342 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
343
344 /*
345 * Match the SVN revisions and build type.
346 */
347 if (uSvnRev != VMMGetSvnRev())
348 {
349 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
350 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
351 return VERR_VMM_R0_VERSION_MISMATCH;
352 }
353 if (uBuildType != vmmGetBuildType())
354 {
355 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
356 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
357 return VERR_VMM_R0_VERSION_MISMATCH;
358 }
359 if ( !VALID_PTR(pVM)
360 || pVM->pVMR0 != pVM)
361 return VERR_INVALID_PARAMETER;
362
363
364#ifdef LOG_ENABLED
365 /*
366 * Register the EMT R0 logger instance for VCPU 0.
367 */
368 PVMCPU pVCpu = &pVM->aCpus[0];
369
370 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
371 if (pR0Logger)
372 {
373# if 0 /* testing of the logger. */
374 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
375 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
376 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
377 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
378
379 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
380 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
381 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
382 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
383
384 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
385 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
386 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
387 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
388
389 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
390 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
391 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
392 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
393 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
394 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
395
396 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
397 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
398
399 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
400 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
401 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
402# endif
403 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
404 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
405 pR0Logger->fRegistered = true;
406 }
407#endif /* LOG_ENABLED */
408
409 /*
410 * Check if the host supports high resolution timers or not.
411 */
412 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
413 && !RTTimerCanDoHighResolution())
414 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
415
416 /*
417 * Initialize the per VM data for GVMM and GMM.
418 */
419 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
420 int rc = GVMMR0InitVM(pVM);
421// if (RT_SUCCESS(rc))
422// rc = GMMR0InitPerVMData(pVM);
423 if (RT_SUCCESS(rc))
424 {
425 /*
426 * Init HM, CPUM and PGM (Darwin only).
427 */
428 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
429 rc = HMR0InitVM(pVM);
430 if (RT_SUCCESS(rc))
431 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
432 if (RT_SUCCESS(rc))
433 {
434 rc = CPUMR0InitVM(pVM);
435 if (RT_SUCCESS(rc))
436 {
437 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
438#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
439 rc = PGMR0DynMapInitVM(pVM);
440#endif
441 if (RT_SUCCESS(rc))
442 {
443 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
444#ifdef VBOX_WITH_PCI_PASSTHROUGH
445 rc = PciRawR0InitVM(pVM);
446#endif
447 if (RT_SUCCESS(rc))
448 {
449 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
450 rc = GIMR0InitVM(pVM);
451 if (RT_SUCCESS(rc))
452 {
453 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
454 if (RT_SUCCESS(rc))
455 {
456 GVMMR0DoneInitVM(pVM);
457 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
458 return rc;
459 }
460
461 /* bail out*/
462 GIMR0TermVM(pVM);
463 }
464#ifdef VBOX_WITH_PCI_PASSTHROUGH
465 PciRawR0TermVM(pVM);
466#endif
467 }
468 }
469 }
470 HMR0TermVM(pVM);
471 }
472 }
473
474 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
475 return rc;
476}
477
478
479/**
480 * Terminates the R0 bits for a particular VM instance.
481 *
482 * This is normally called by ring-3 as part of the VM termination process, but
483 * may alternatively be called during the support driver session cleanup when
484 * the VM object is destroyed (see GVMM).
485 *
486 * @returns VBox status code.
487 *
488 * @param pVM Pointer to the VM.
489 * @param pGVM Pointer to the global VM structure. Optional.
490 * @thread EMT or session clean up thread.
491 */
492VMMR0_INT_DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
493{
494#ifdef VBOX_WITH_PCI_PASSTHROUGH
495 PciRawR0TermVM(pVM);
496#endif
497
498 /*
499 * Tell GVMM what we're up to and check that we only do this once.
500 */
501 if (GVMMR0DoingTermVM(pVM, pGVM))
502 {
503 GIMR0TermVM(pVM);
504
505 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
506 * here to make sure we don't leak any shared pages if we crash... */
507#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
508 PGMR0DynMapTermVM(pVM);
509#endif
510 HMR0TermVM(pVM);
511 }
512
513 /*
514 * Deregister the logger.
515 */
516 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
517 return VINF_SUCCESS;
518}
519
520
521/**
522 * VMM ring-0 thread-context callback.
523 *
524 * This does common HM state updating and calls the HM-specific thread-context
525 * callback.
526 *
527 * @param enmEvent The thread-context event.
528 * @param pvUser Opaque pointer to the VMCPU.
529 *
530 * @thread EMT(pvUser)
531 */
532static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
533{
534 PVMCPU pVCpu = (PVMCPU)pvUser;
535
536 switch (enmEvent)
537 {
538 case RTTHREADCTXEVENT_IN:
539 {
540 /*
541 * Linux may call us with preemption enabled (really!) but technically we
542 * cannot get preempted here, otherwise we end up in an infinite recursion
543 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
544 * ad infinitum). Let's just disable preemption for now...
545 */
546 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
547 * preemption after doing the callout (one or two functions up the
548 * call chain). */
549 /** @todo r=ramshankar: See @bugref{5313} comment #30. */
550 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
551 RTThreadPreemptDisable(&ParanoidPreemptState);
552
553 /* We need to update the VCPU <-> host CPU mapping. */
554 RTCPUID idHostCpu;
555 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
556 pVCpu->iHostCpuSet = iHostCpuSet;
557 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
558
559 /* In the very unlikely event that the GIP delta for the CPU we're
560 rescheduled needs calculating, try force a return to ring-3.
561 We unfortunately cannot do the measurements right here. */
562 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
563 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
564
565 /* Invoke the HM-specific thread-context callback. */
566 HMR0ThreadCtxCallback(enmEvent, pvUser);
567
568 /* Restore preemption. */
569 RTThreadPreemptRestore(&ParanoidPreemptState);
570 break;
571 }
572
573 case RTTHREADCTXEVENT_OUT:
574 {
575 /* Invoke the HM-specific thread-context callback. */
576 HMR0ThreadCtxCallback(enmEvent, pvUser);
577
578 /*
579 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
580 * have the same host CPU associated with it.
581 */
582 pVCpu->iHostCpuSet = UINT32_MAX;
583 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
584 break;
585 }
586
587 default:
588 /* Invoke the HM-specific thread-context callback. */
589 HMR0ThreadCtxCallback(enmEvent, pvUser);
590 break;
591 }
592}
593
594
595/**
596 * Creates thread switching hook for the current EMT thread.
597 *
598 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
599 * platform does not implement switcher hooks, no hooks will be create and the
600 * member set to NIL_RTTHREADCTXHOOK.
601 *
602 * @returns VBox status code.
603 * @param pVCpu Pointer to the cross context CPU structure.
604 * @thread EMT(pVCpu)
605 */
606VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu)
607{
608 VMCPU_ASSERT_EMT(pVCpu);
609 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
610
611 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
612 if (RT_SUCCESS(rc))
613 return rc;
614
615 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
616 if (rc == VERR_NOT_SUPPORTED)
617 return VINF_SUCCESS;
618
619 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
620 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
621}
622
623
624/**
625 * Destroys the thread switching hook for the specified VCPU.
626 *
627 * @param pVCpu Pointer to the cross context CPU structure.
628 * @remarks Can be called from any thread.
629 */
630VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu)
631{
632 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
633 AssertRC(rc);
634}
635
636
637/**
638 * Disables the thread switching hook for this VCPU (if we got one).
639 *
640 * @param pVCpu Pointer to the cross context CPU structure.
641 * @thread EMT(pVCpu)
642 *
643 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
644 * this call. This means you have to be careful with what you do!
645 */
646VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu)
647{
648 /*
649 * Clear the VCPU <-> host CPU mapping as we've left HM context.
650 * @bugref{7726} comment #19 explains the need for this trick:
651 *
652 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
653 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
654 * longjmp & normal return to ring-3, which opens a window where we may be
655 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
656 * the CPU starts executing a different EMT. Both functions first disables
657 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
658 * an opening for getting preempted.
659 */
660 /** @todo Make HM not need this API! Then we could leave the hooks enabled
661 * all the time. */
662 /** @todo move this into the context hook disabling if(). */
663 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
664
665 /*
666 * Disable the context hook, if we got one.
667 */
668 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
669 {
670 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
671 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
672 AssertRC(rc);
673 }
674}
675
676
677/**
678 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
679 *
680 * @returns true if registered, false otherwise.
681 * @param pVCpu Pointer to the VMCPU.
682 */
683DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
684{
685 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
686}
687
688
689/**
690 * Whether thread-context hooks are registered for this VCPU.
691 *
692 * @returns true if registered, false otherwise.
693 * @param pVCpu Pointer to the VMCPU.
694 */
695VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
696{
697 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
698}
699
700
701#ifdef VBOX_WITH_STATISTICS
702/**
703 * Record return code statistics
704 * @param pVM Pointer to the VM.
705 * @param pVCpu Pointer to the VMCPU.
706 * @param rc The status code.
707 */
708static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
709{
710 /*
711 * Collect statistics.
712 */
713 switch (rc)
714 {
715 case VINF_SUCCESS:
716 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
717 break;
718 case VINF_EM_RAW_INTERRUPT:
719 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
720 break;
721 case VINF_EM_RAW_INTERRUPT_HYPER:
722 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
723 break;
724 case VINF_EM_RAW_GUEST_TRAP:
725 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
726 break;
727 case VINF_EM_RAW_RING_SWITCH:
728 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
729 break;
730 case VINF_EM_RAW_RING_SWITCH_INT:
731 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
732 break;
733 case VINF_EM_RAW_STALE_SELECTOR:
734 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
735 break;
736 case VINF_EM_RAW_IRET_TRAP:
737 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
738 break;
739 case VINF_IOM_R3_IOPORT_READ:
740 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
741 break;
742 case VINF_IOM_R3_IOPORT_WRITE:
743 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
744 break;
745 case VINF_IOM_R3_MMIO_READ:
746 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
747 break;
748 case VINF_IOM_R3_MMIO_WRITE:
749 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
750 break;
751 case VINF_IOM_R3_MMIO_READ_WRITE:
752 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
753 break;
754 case VINF_PATM_HC_MMIO_PATCH_READ:
755 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
756 break;
757 case VINF_PATM_HC_MMIO_PATCH_WRITE:
758 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
759 break;
760 case VINF_CPUM_R3_MSR_READ:
761 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
762 break;
763 case VINF_CPUM_R3_MSR_WRITE:
764 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
765 break;
766 case VINF_EM_RAW_EMULATE_INSTR:
767 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
768 break;
769 case VINF_EM_RAW_EMULATE_IO_BLOCK:
770 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
771 break;
772 case VINF_PATCH_EMULATE_INSTR:
773 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
774 break;
775 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
776 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
777 break;
778 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
779 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
780 break;
781 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
782 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
783 break;
784 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
785 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
786 break;
787 case VINF_CSAM_PENDING_ACTION:
788 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
789 break;
790 case VINF_PGM_SYNC_CR3:
791 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
792 break;
793 case VINF_PATM_PATCH_INT3:
794 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
795 break;
796 case VINF_PATM_PATCH_TRAP_PF:
797 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
798 break;
799 case VINF_PATM_PATCH_TRAP_GP:
800 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
801 break;
802 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
803 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
804 break;
805 case VINF_EM_RESCHEDULE_REM:
806 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
807 break;
808 case VINF_EM_RAW_TO_R3:
809 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
810 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
811 else if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
812 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
813 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES))
814 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
815 else if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
816 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
817 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
818 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
819 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))
820 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
821 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
822 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
823 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3))
824 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
825 else
826 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
827 break;
828
829 case VINF_EM_RAW_TIMER_PENDING:
830 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
831 break;
832 case VINF_EM_RAW_INTERRUPT_PENDING:
833 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
834 break;
835 case VINF_VMM_CALL_HOST:
836 switch (pVCpu->vmm.s.enmCallRing3Operation)
837 {
838 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
839 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
840 break;
841 case VMMCALLRING3_PDM_LOCK:
842 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
843 break;
844 case VMMCALLRING3_PGM_POOL_GROW:
845 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
846 break;
847 case VMMCALLRING3_PGM_LOCK:
848 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
849 break;
850 case VMMCALLRING3_PGM_MAP_CHUNK:
851 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
852 break;
853 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
854 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
855 break;
856 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
857 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
858 break;
859 case VMMCALLRING3_VMM_LOGGER_FLUSH:
860 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
861 break;
862 case VMMCALLRING3_VM_SET_ERROR:
863 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
864 break;
865 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
866 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
867 break;
868 case VMMCALLRING3_VM_R0_ASSERTION:
869 default:
870 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
871 break;
872 }
873 break;
874 case VINF_PATM_DUPLICATE_FUNCTION:
875 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
876 break;
877 case VINF_PGM_CHANGE_MODE:
878 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
879 break;
880 case VINF_PGM_POOL_FLUSH_PENDING:
881 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
882 break;
883 case VINF_EM_PENDING_REQUEST:
884 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
885 break;
886 case VINF_EM_HM_PATCH_TPR_INSTR:
887 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
888 break;
889 default:
890 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
891 break;
892 }
893}
894#endif /* VBOX_WITH_STATISTICS */
895
896
897/**
898 * Unused ring-0 entry point that used to be called from the interrupt gate.
899 *
900 * Will be removed one of the next times we do a major SUPDrv version bump.
901 *
902 * @returns VBox status code.
903 * @param pVM Pointer to the VM.
904 * @param enmOperation Which operation to execute.
905 * @param pvArg Argument to the operation.
906 * @remarks Assume called with interrupts disabled.
907 */
908VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
909{
910 /*
911 * We're returning VERR_NOT_SUPPORT here so we've got something else
912 * than -1 which the interrupt gate glue code might return.
913 */
914 Log(("operation %#x is not supported\n", enmOperation));
915 NOREF(enmOperation); NOREF(pvArg); NOREF(pVM);
916 return VERR_NOT_SUPPORTED;
917}
918
919
920/**
921 * The Ring 0 entry point, called by the fast-ioctl path.
922 *
923 * @param pVM Pointer to the VM.
924 * The return code is stored in pVM->vmm.s.iLastGZRc.
925 * @param idCpu The Virtual CPU ID of the calling EMT.
926 * @param enmOperation Which operation to execute.
927 * @remarks Assume called with interrupts _enabled_.
928 */
929VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
930{
931 /*
932 * Validation.
933 */
934 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
935 return;
936 PVMCPU pVCpu = &pVM->aCpus[idCpu];
937 if (RT_UNLIKELY(pVCpu->hNativeThreadR0 != RTThreadNativeSelf()))
938 return;
939
940 /*
941 * Perform requested operation.
942 */
943 switch (enmOperation)
944 {
945 /*
946 * Switch to GC and run guest raw mode code.
947 * Disable interrupts before doing the world switch.
948 */
949 case VMMR0_DO_RAW_RUN:
950 {
951#ifdef VBOX_WITH_RAW_MODE
952# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
953 /* Some safety precautions first. */
954 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
955 {
956 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
957 break;
958 }
959# endif
960
961 /*
962 * Disable preemption.
963 */
964 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
965 RTThreadPreemptDisable(&PreemptState);
966
967 /*
968 * Get the host CPU identifiers, make sure they are valid and that
969 * we've got a TSC delta for the CPU.
970 */
971 RTCPUID idHostCpu;
972 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
973 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
974 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
975 {
976 /*
977 * Commit the CPU identifiers and update the periodict preemption timer if it's active.
978 */
979# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
980 CPUMR0SetLApic(pVCpu, iHostCpuSet);
981# endif
982 pVCpu->iHostCpuSet = iHostCpuSet;
983 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
984
985 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
986 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
987
988 /*
989 * We might need to disable VT-x if the active switcher turns off paging.
990 */
991 bool fVTxDisabled;
992 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
993 if (RT_SUCCESS(rc))
994 {
995 /*
996 * Disable interrupts and run raw-mode code. The loop is for efficiently
997 * dispatching tracepoints that fired in raw-mode context.
998 */
999 RTCCUINTREG uFlags = ASMIntDisableFlags();
1000
1001 for (;;)
1002 {
1003 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
1004 TMNotifyStartOfExecution(pVCpu);
1005
1006 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1007 pVCpu->vmm.s.iLastGZRc = rc;
1008
1009 TMNotifyEndOfExecution(pVCpu);
1010 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1011
1012 if (rc != VINF_VMM_CALL_TRACER)
1013 break;
1014 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
1015 }
1016
1017 /*
1018 * Re-enable VT-x before we dispatch any pending host interrupts and
1019 * re-enables interrupts.
1020 */
1021 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1022
1023 if ( rc == VINF_EM_RAW_INTERRUPT
1024 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1025 TRPMR0DispatchHostInterrupt(pVM);
1026
1027 ASMSetFlags(uFlags);
1028
1029 /* Fire dtrace probe and collect statistics. */
1030 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1031# ifdef VBOX_WITH_STATISTICS
1032 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1033 vmmR0RecordRC(pVM, pVCpu, rc);
1034# endif
1035 }
1036 else
1037 pVCpu->vmm.s.iLastGZRc = rc;
1038
1039 /*
1040 * Invalidate the host CPU identifiers as we restore preemption.
1041 */
1042 pVCpu->iHostCpuSet = UINT32_MAX;
1043 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1044
1045 RTThreadPreemptRestore(&PreemptState);
1046 }
1047 /*
1048 * Invalid CPU set index or TSC delta in need of measuring.
1049 */
1050 else
1051 {
1052 RTThreadPreemptRestore(&PreemptState);
1053 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1054 {
1055 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1056 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1057 0 /*default cTries*/);
1058 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1059 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1060 else
1061 pVCpu->vmm.s.iLastGZRc = rc;
1062 }
1063 else
1064 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1065 }
1066
1067#else /* !VBOX_WITH_RAW_MODE */
1068 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_NOT_SUPPORTED;
1069#endif
1070 break;
1071 }
1072
1073 /*
1074 * Run guest code using the available hardware acceleration technology.
1075 */
1076 case VMMR0_DO_HM_RUN:
1077 {
1078 /*
1079 * Disable preemption.
1080 */
1081 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1082 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1083 RTThreadPreemptDisable(&PreemptState);
1084
1085 /*
1086 * Get the host CPU identifiers, make sure they are valid and that
1087 * we've got a TSC delta for the CPU.
1088 */
1089 RTCPUID idHostCpu;
1090 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1091 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1092 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1093 {
1094 pVCpu->iHostCpuSet = iHostCpuSet;
1095 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1096
1097 /*
1098 * Update the periodic preemption timer if it's active.
1099 */
1100 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1101 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1102
1103#ifdef LOG_ENABLED
1104 /*
1105 * Ugly: Lazy registration of ring 0 loggers.
1106 */
1107 if (pVCpu->idCpu > 0)
1108 {
1109 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
1110 if ( pR0Logger
1111 && RT_UNLIKELY(!pR0Logger->fRegistered))
1112 {
1113 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
1114 pR0Logger->fRegistered = true;
1115 }
1116 }
1117#endif
1118
1119 int rc;
1120 bool fPreemptRestored = false;
1121 if (!HMR0SuspendPending())
1122 {
1123 /*
1124 * Enable the context switching hook.
1125 */
1126 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1127 {
1128 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1129 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1130 }
1131
1132 /*
1133 * Enter HM context.
1134 */
1135 rc = HMR0Enter(pVM, pVCpu);
1136 if (RT_SUCCESS(rc))
1137 {
1138 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1139
1140 /*
1141 * When preemption hooks are in place, enable preemption now that
1142 * we're in HM context.
1143 */
1144 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1145 {
1146 fPreemptRestored = true;
1147 RTThreadPreemptRestore(&PreemptState);
1148 }
1149
1150 /*
1151 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1152 */
1153 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1154
1155 /*
1156 * Assert sanity on the way out. Using manual assertions code here as normal
1157 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1158 */
1159 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1160 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1161 {
1162 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1163 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1164 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1165 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1166 }
1167 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1168 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1169 {
1170 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1171 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1172 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1173 rc = VERR_INVALID_STATE;
1174 }
1175
1176 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1177 }
1178 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1179
1180 /*
1181 * Invalidate the host CPU identifiers before we disable the context
1182 * hook / restore preemption.
1183 */
1184 pVCpu->iHostCpuSet = UINT32_MAX;
1185 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1186
1187 /*
1188 * Disable context hooks. Due to unresolved cleanup issues, we
1189 * cannot leave the hooks enabled when we return to ring-3.
1190 *
1191 * Note! At the moment HM may also have disabled the hook
1192 * when we get here, but the IPRT API handles that.
1193 */
1194 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1195 {
1196 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1197 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1198 }
1199 }
1200 /*
1201 * The system is about to go into suspend mode; go back to ring 3.
1202 */
1203 else
1204 {
1205 rc = VINF_EM_RAW_INTERRUPT;
1206 pVCpu->iHostCpuSet = UINT32_MAX;
1207 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1208 }
1209
1210 /** @todo When HM stops messing with the context hook state, we'll disable
1211 * preemption again before the RTThreadCtxHookDisable call. */
1212 if (!fPreemptRestored)
1213 RTThreadPreemptRestore(&PreemptState);
1214
1215 pVCpu->vmm.s.iLastGZRc = rc;
1216
1217 /* Fire dtrace probe and collect statistics. */
1218 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1219#ifdef VBOX_WITH_STATISTICS
1220 vmmR0RecordRC(pVM, pVCpu, rc);
1221#endif
1222 }
1223 /*
1224 * Invalid CPU set index or TSC delta in need of measuring.
1225 */
1226 else
1227 {
1228 pVCpu->iHostCpuSet = UINT32_MAX;
1229 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1230 RTThreadPreemptRestore(&PreemptState);
1231 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1232 {
1233 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1234 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1235 0 /*default cTries*/);
1236 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1237 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1238 else
1239 pVCpu->vmm.s.iLastGZRc = rc;
1240 }
1241 else
1242 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1243 }
1244 break;
1245 }
1246
1247 /*
1248 * For profiling.
1249 */
1250 case VMMR0_DO_NOP:
1251 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1252 break;
1253
1254 /*
1255 * Impossible.
1256 */
1257 default:
1258 AssertMsgFailed(("%#x\n", enmOperation));
1259 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1260 break;
1261 }
1262}
1263
1264
1265/**
1266 * Validates a session or VM session argument.
1267 *
1268 * @returns true / false accordingly.
1269 * @param pVM Pointer to the VM.
1270 * @param pSession The session argument.
1271 */
1272DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1273{
1274 /* This must be set! */
1275 if (!pSession)
1276 return false;
1277
1278 /* Only one out of the two. */
1279 if (pVM && pClaimedSession)
1280 return false;
1281 if (pVM)
1282 pClaimedSession = pVM->pSession;
1283 return pClaimedSession == pSession;
1284}
1285
1286
1287/**
1288 * VMMR0EntryEx worker function, either called directly or when ever possible
1289 * called thru a longjmp so we can exit safely on failure.
1290 *
1291 * @returns VBox status code.
1292 * @param pVM Pointer to the VM.
1293 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1294 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1295 * @param enmOperation Which operation to execute.
1296 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1297 * The support driver validates this if it's present.
1298 * @param u64Arg Some simple constant argument.
1299 * @param pSession The session of the caller.
1300 * @remarks Assume called with interrupts _enabled_.
1301 */
1302static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1303{
1304 /*
1305 * Common VM pointer validation.
1306 */
1307 if (pVM)
1308 {
1309 if (RT_UNLIKELY( !VALID_PTR(pVM)
1310 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
1311 {
1312 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
1313 return VERR_INVALID_POINTER;
1314 }
1315 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
1316 || pVM->enmVMState > VMSTATE_TERMINATED
1317 || pVM->pVMR0 != pVM))
1318 {
1319 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
1320 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
1321 return VERR_INVALID_POINTER;
1322 }
1323
1324 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
1325 {
1326 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
1327 return VERR_INVALID_PARAMETER;
1328 }
1329 }
1330 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
1331 {
1332 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1333 return VERR_INVALID_PARAMETER;
1334 }
1335
1336
1337 switch (enmOperation)
1338 {
1339 /*
1340 * GVM requests
1341 */
1342 case VMMR0_DO_GVMM_CREATE_VM:
1343 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
1344 return VERR_INVALID_PARAMETER;
1345 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
1346
1347 case VMMR0_DO_GVMM_DESTROY_VM:
1348 if (pReqHdr || u64Arg)
1349 return VERR_INVALID_PARAMETER;
1350 return GVMMR0DestroyVM(pVM);
1351
1352 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1353 {
1354 if (!pVM)
1355 return VERR_INVALID_PARAMETER;
1356 return GVMMR0RegisterVCpu(pVM, idCpu);
1357 }
1358
1359 case VMMR0_DO_GVMM_SCHED_HALT:
1360 if (pReqHdr)
1361 return VERR_INVALID_PARAMETER;
1362 return GVMMR0SchedHalt(pVM, idCpu, u64Arg);
1363
1364 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1365 if (pReqHdr || u64Arg)
1366 return VERR_INVALID_PARAMETER;
1367 return GVMMR0SchedWakeUp(pVM, idCpu);
1368
1369 case VMMR0_DO_GVMM_SCHED_POKE:
1370 if (pReqHdr || u64Arg)
1371 return VERR_INVALID_PARAMETER;
1372 return GVMMR0SchedPoke(pVM, idCpu);
1373
1374 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1375 if (u64Arg)
1376 return VERR_INVALID_PARAMETER;
1377 return GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1378
1379 case VMMR0_DO_GVMM_SCHED_POLL:
1380 if (pReqHdr || u64Arg > 1)
1381 return VERR_INVALID_PARAMETER;
1382 return GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
1383
1384 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1385 if (u64Arg)
1386 return VERR_INVALID_PARAMETER;
1387 return GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
1388
1389 case VMMR0_DO_GVMM_RESET_STATISTICS:
1390 if (u64Arg)
1391 return VERR_INVALID_PARAMETER;
1392 return GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
1393
1394 /*
1395 * Initialize the R0 part of a VM instance.
1396 */
1397 case VMMR0_DO_VMMR0_INIT:
1398 return vmmR0InitVM(pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1399
1400 /*
1401 * Terminate the R0 part of a VM instance.
1402 */
1403 case VMMR0_DO_VMMR0_TERM:
1404 return VMMR0TermVM(pVM, NULL);
1405
1406 /*
1407 * Attempt to enable hm mode and check the current setting.
1408 */
1409 case VMMR0_DO_HM_ENABLE:
1410 return HMR0EnableAllCpus(pVM);
1411
1412 /*
1413 * Setup the hardware accelerated session.
1414 */
1415 case VMMR0_DO_HM_SETUP_VM:
1416 return HMR0SetupVM(pVM);
1417
1418 /*
1419 * Switch to RC to execute Hypervisor function.
1420 */
1421 case VMMR0_DO_CALL_HYPERVISOR:
1422 {
1423#ifdef VBOX_WITH_RAW_MODE
1424 /*
1425 * Validate input / context.
1426 */
1427 if (RT_UNLIKELY(idCpu != 0))
1428 return VERR_INVALID_CPU_ID;
1429 if (RT_UNLIKELY(pVM->cCpus != 1))
1430 return VERR_INVALID_PARAMETER;
1431 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1432# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1433 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1434 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1435# endif
1436
1437 /*
1438 * Disable interrupts.
1439 */
1440 RTCCUINTREG fFlags = ASMIntDisableFlags();
1441
1442 /*
1443 * Get the host CPU identifiers, make sure they are valid and that
1444 * we've got a TSC delta for the CPU.
1445 */
1446 RTCPUID idHostCpu;
1447 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1448 if (RT_UNLIKELY(iHostCpuSet >= RTCPUSET_MAX_CPUS))
1449 {
1450 ASMSetFlags(fFlags);
1451 return VERR_INVALID_CPU_INDEX;
1452 }
1453 if (RT_UNLIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1454 {
1455 ASMSetFlags(fFlags);
1456 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1457 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1458 0 /*default cTries*/);
1459 if (RT_FAILURE(rc) && rc != VERR_CPU_OFFLINE)
1460 return rc;
1461 }
1462
1463 /*
1464 * Commit the CPU identifiers.
1465 */
1466# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1467 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1468# endif
1469 pVCpu->iHostCpuSet = iHostCpuSet;
1470 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1471
1472 /*
1473 * We might need to disable VT-x if the active switcher turns off paging.
1474 */
1475 bool fVTxDisabled;
1476 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1477 if (RT_SUCCESS(rc))
1478 {
1479 /*
1480 * Go through the wormhole...
1481 */
1482 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1483
1484 /*
1485 * Re-enable VT-x before we dispatch any pending host interrupts.
1486 */
1487 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1488
1489 if ( rc == VINF_EM_RAW_INTERRUPT
1490 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1491 TRPMR0DispatchHostInterrupt(pVM);
1492 }
1493
1494 /*
1495 * Invalidate the host CPU identifiers as we restore interrupts.
1496 */
1497 pVCpu->iHostCpuSet = UINT32_MAX;
1498 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1499 ASMSetFlags(fFlags);
1500 return rc;
1501
1502#else /* !VBOX_WITH_RAW_MODE */
1503 return VERR_RAW_MODE_NOT_SUPPORTED;
1504#endif
1505 }
1506
1507 /*
1508 * PGM wrappers.
1509 */
1510 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1511 if (idCpu == NIL_VMCPUID)
1512 return VERR_INVALID_CPU_ID;
1513 return PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
1514
1515 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1516 if (idCpu == NIL_VMCPUID)
1517 return VERR_INVALID_CPU_ID;
1518 return PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu]);
1519
1520 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1521 if (idCpu == NIL_VMCPUID)
1522 return VERR_INVALID_CPU_ID;
1523 return PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
1524
1525 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1526 if (idCpu != 0)
1527 return VERR_INVALID_CPU_ID;
1528 return PGMR0PhysSetupIommu(pVM);
1529
1530 /*
1531 * GMM wrappers.
1532 */
1533 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1534 if (u64Arg)
1535 return VERR_INVALID_PARAMETER;
1536 return GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1537
1538 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1539 if (u64Arg)
1540 return VERR_INVALID_PARAMETER;
1541 return GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1542
1543 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1544 if (u64Arg)
1545 return VERR_INVALID_PARAMETER;
1546 return GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1547
1548 case VMMR0_DO_GMM_FREE_PAGES:
1549 if (u64Arg)
1550 return VERR_INVALID_PARAMETER;
1551 return GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1552
1553 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1554 if (u64Arg)
1555 return VERR_INVALID_PARAMETER;
1556 return GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1557
1558 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1559 if (u64Arg)
1560 return VERR_INVALID_PARAMETER;
1561 return GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
1562
1563 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1564 if (idCpu == NIL_VMCPUID)
1565 return VERR_INVALID_CPU_ID;
1566 if (u64Arg)
1567 return VERR_INVALID_PARAMETER;
1568 return GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1569
1570 case VMMR0_DO_GMM_BALLOONED_PAGES:
1571 if (u64Arg)
1572 return VERR_INVALID_PARAMETER;
1573 return GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1574
1575 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1576 if (u64Arg)
1577 return VERR_INVALID_PARAMETER;
1578 return GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1579
1580 case VMMR0_DO_GMM_SEED_CHUNK:
1581 if (pReqHdr)
1582 return VERR_INVALID_PARAMETER;
1583 return GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
1584
1585 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1586 if (idCpu == NIL_VMCPUID)
1587 return VERR_INVALID_CPU_ID;
1588 if (u64Arg)
1589 return VERR_INVALID_PARAMETER;
1590 return GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1591
1592 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1593 if (idCpu == NIL_VMCPUID)
1594 return VERR_INVALID_CPU_ID;
1595 if (u64Arg)
1596 return VERR_INVALID_PARAMETER;
1597 return GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1598
1599 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1600 if (idCpu == NIL_VMCPUID)
1601 return VERR_INVALID_CPU_ID;
1602 if ( u64Arg
1603 || pReqHdr)
1604 return VERR_INVALID_PARAMETER;
1605 return GMMR0ResetSharedModules(pVM, idCpu);
1606
1607#ifdef VBOX_WITH_PAGE_SHARING
1608 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1609 {
1610 if (idCpu == NIL_VMCPUID)
1611 return VERR_INVALID_CPU_ID;
1612 if ( u64Arg
1613 || pReqHdr)
1614 return VERR_INVALID_PARAMETER;
1615
1616 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1617 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf());
1618
1619# ifdef DEBUG_sandervl
1620 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
1621 /* Todo: this can have bad side effects for unexpected jumps back to r3. */
1622 int rc = GMMR0CheckSharedModulesStart(pVM);
1623 if (rc == VINF_SUCCESS)
1624 {
1625 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
1626 Assert( rc == VINF_SUCCESS
1627 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
1628 GMMR0CheckSharedModulesEnd(pVM);
1629 }
1630# else
1631 int rc = GMMR0CheckSharedModules(pVM, pVCpu);
1632# endif
1633 return rc;
1634 }
1635#endif
1636
1637#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1638 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1639 if (u64Arg)
1640 return VERR_INVALID_PARAMETER;
1641 return GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1642#endif
1643
1644 case VMMR0_DO_GMM_QUERY_STATISTICS:
1645 if (u64Arg)
1646 return VERR_INVALID_PARAMETER;
1647 return GMMR0QueryStatisticsReq(pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1648
1649 case VMMR0_DO_GMM_RESET_STATISTICS:
1650 if (u64Arg)
1651 return VERR_INVALID_PARAMETER;
1652 return GMMR0ResetStatisticsReq(pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1653
1654 /*
1655 * A quick GCFGM mock-up.
1656 */
1657 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1658 case VMMR0_DO_GCFGM_SET_VALUE:
1659 case VMMR0_DO_GCFGM_QUERY_VALUE:
1660 {
1661 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1662 return VERR_INVALID_PARAMETER;
1663 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1664 if (pReq->Hdr.cbReq != sizeof(*pReq))
1665 return VERR_INVALID_PARAMETER;
1666 int rc;
1667 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1668 {
1669 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1670 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1671 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1672 }
1673 else
1674 {
1675 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1676 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1677 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1678 }
1679 return rc;
1680 }
1681
1682 /*
1683 * PDM Wrappers.
1684 */
1685 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1686 {
1687 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1688 return VERR_INVALID_PARAMETER;
1689 return PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1690 }
1691
1692 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1693 {
1694 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1695 return VERR_INVALID_PARAMETER;
1696 return PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1697 }
1698
1699 /*
1700 * Requests to the internal networking service.
1701 */
1702 case VMMR0_DO_INTNET_OPEN:
1703 {
1704 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1705 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1706 return VERR_INVALID_PARAMETER;
1707 return IntNetR0OpenReq(pSession, pReq);
1708 }
1709
1710 case VMMR0_DO_INTNET_IF_CLOSE:
1711 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1712 return VERR_INVALID_PARAMETER;
1713 return IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1714
1715 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1716 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1717 return VERR_INVALID_PARAMETER;
1718 return IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1719
1720 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1721 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1722 return VERR_INVALID_PARAMETER;
1723 return IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1724
1725 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1726 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1727 return VERR_INVALID_PARAMETER;
1728 return IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1729
1730 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1731 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1732 return VERR_INVALID_PARAMETER;
1733 return IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1734
1735 case VMMR0_DO_INTNET_IF_SEND:
1736 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1737 return VERR_INVALID_PARAMETER;
1738 return IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1739
1740 case VMMR0_DO_INTNET_IF_WAIT:
1741 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1742 return VERR_INVALID_PARAMETER;
1743 return IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1744
1745 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1746 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1747 return VERR_INVALID_PARAMETER;
1748 return IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1749
1750#ifdef VBOX_WITH_PCI_PASSTHROUGH
1751 /*
1752 * Requests to host PCI driver service.
1753 */
1754 case VMMR0_DO_PCIRAW_REQ:
1755 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1756 return VERR_INVALID_PARAMETER;
1757 return PciRawR0ProcessReq(pSession, pVM, (PPCIRAWSENDREQ)pReqHdr);
1758#endif
1759 /*
1760 * For profiling.
1761 */
1762 case VMMR0_DO_NOP:
1763 case VMMR0_DO_SLOW_NOP:
1764 return VINF_SUCCESS;
1765
1766 /*
1767 * For testing Ring-0 APIs invoked in this environment.
1768 */
1769 case VMMR0_DO_TESTS:
1770 /** @todo make new test */
1771 return VINF_SUCCESS;
1772
1773
1774#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1775 case VMMR0_DO_TEST_SWITCHER3264:
1776 if (idCpu == NIL_VMCPUID)
1777 return VERR_INVALID_CPU_ID;
1778 return HMR0TestSwitcher3264(pVM);
1779#endif
1780 default:
1781 /*
1782 * We're returning VERR_NOT_SUPPORT here so we've got something else
1783 * than -1 which the interrupt gate glue code might return.
1784 */
1785 Log(("operation %#x is not supported\n", enmOperation));
1786 return VERR_NOT_SUPPORTED;
1787 }
1788}
1789
1790
1791/**
1792 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1793 */
1794typedef struct VMMR0ENTRYEXARGS
1795{
1796 PVM pVM;
1797 VMCPUID idCpu;
1798 VMMR0OPERATION enmOperation;
1799 PSUPVMMR0REQHDR pReq;
1800 uint64_t u64Arg;
1801 PSUPDRVSESSION pSession;
1802} VMMR0ENTRYEXARGS;
1803/** Pointer to a vmmR0EntryExWrapper argument package. */
1804typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1805
1806/**
1807 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1808 *
1809 * @returns VBox status code.
1810 * @param pvArgs The argument package
1811 */
1812static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
1813{
1814 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1815 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1816 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1817 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1818 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1819 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1820}
1821
1822
1823/**
1824 * The Ring 0 entry point, called by the support library (SUP).
1825 *
1826 * @returns VBox status code.
1827 * @param pVM Pointer to the VM.
1828 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1829 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1830 * @param enmOperation Which operation to execute.
1831 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
1832 * @param u64Arg Some simple constant argument.
1833 * @param pSession The session of the caller.
1834 * @remarks Assume called with interrupts _enabled_.
1835 */
1836VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1837{
1838 /*
1839 * Requests that should only happen on the EMT thread will be
1840 * wrapped in a setjmp so we can assert without causing trouble.
1841 */
1842 if ( VALID_PTR(pVM)
1843 && pVM->pVMR0
1844 && idCpu < pVM->cCpus)
1845 {
1846 switch (enmOperation)
1847 {
1848 /* These might/will be called before VMMR3Init. */
1849 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1850 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1851 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1852 case VMMR0_DO_GMM_FREE_PAGES:
1853 case VMMR0_DO_GMM_BALLOONED_PAGES:
1854 /* On the mac we might not have a valid jmp buf, so check these as well. */
1855 case VMMR0_DO_VMMR0_INIT:
1856 case VMMR0_DO_VMMR0_TERM:
1857 {
1858 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1859
1860 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1861 break;
1862
1863 /** @todo validate this EMT claim... GVM knows. */
1864 VMMR0ENTRYEXARGS Args;
1865 Args.pVM = pVM;
1866 Args.idCpu = idCpu;
1867 Args.enmOperation = enmOperation;
1868 Args.pReq = pReq;
1869 Args.u64Arg = u64Arg;
1870 Args.pSession = pSession;
1871 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1872 }
1873
1874 default:
1875 break;
1876 }
1877 }
1878 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
1879}
1880
1881
1882/**
1883 * Checks whether we've armed the ring-0 long jump machinery.
1884 *
1885 * @returns @c true / @c false
1886 * @param pVCpu Pointer to the VMCPU.
1887 * @thread EMT
1888 * @sa VMMIsLongJumpArmed
1889 */
1890VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
1891{
1892#ifdef RT_ARCH_X86
1893 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
1894 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
1895#else
1896 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
1897 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
1898#endif
1899}
1900
1901
1902/**
1903 * Checks whether we've done a ring-3 long jump.
1904 *
1905 * @returns @c true / @c false
1906 * @param pVCpu Pointer to the VMCPU.
1907 * @thread EMT
1908 */
1909VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
1910{
1911 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
1912}
1913
1914
1915/**
1916 * Internal R0 logger worker: Flush logger.
1917 *
1918 * @param pLogger The logger instance to flush.
1919 * @remark This function must be exported!
1920 */
1921VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
1922{
1923#ifdef LOG_ENABLED
1924 /*
1925 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
1926 * (This is a bit paranoid code.)
1927 */
1928 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1929 if ( !VALID_PTR(pR0Logger)
1930 || !VALID_PTR(pR0Logger + 1)
1931 || pLogger->u32Magic != RTLOGGER_MAGIC)
1932 {
1933# ifdef DEBUG
1934 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
1935# endif
1936 return;
1937 }
1938 if (pR0Logger->fFlushingDisabled)
1939 return; /* quietly */
1940
1941 PVM pVM = pR0Logger->pVM;
1942 if ( !VALID_PTR(pVM)
1943 || pVM->pVMR0 != pVM)
1944 {
1945# ifdef DEBUG
1946 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
1947# endif
1948 return;
1949 }
1950
1951 PVMCPU pVCpu = VMMGetCpu(pVM);
1952 if (pVCpu)
1953 {
1954 /*
1955 * Check that the jump buffer is armed.
1956 */
1957# ifdef RT_ARCH_X86
1958 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
1959 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1960# else
1961 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
1962 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1963# endif
1964 {
1965# ifdef DEBUG
1966 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
1967# endif
1968 return;
1969 }
1970 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
1971 }
1972# ifdef DEBUG
1973 else
1974 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
1975# endif
1976#endif
1977}
1978
1979/**
1980 * Internal R0 logger worker: Custom prefix.
1981 *
1982 * @returns Number of chars written.
1983 *
1984 * @param pLogger The logger instance.
1985 * @param pchBuf The output buffer.
1986 * @param cchBuf The size of the buffer.
1987 * @param pvUser User argument (ignored).
1988 */
1989VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1990{
1991 NOREF(pvUser);
1992#ifdef LOG_ENABLED
1993 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1994 if ( !VALID_PTR(pR0Logger)
1995 || !VALID_PTR(pR0Logger + 1)
1996 || pLogger->u32Magic != RTLOGGER_MAGIC
1997 || cchBuf < 2)
1998 return 0;
1999
2000 static const char s_szHex[17] = "0123456789abcdef";
2001 VMCPUID const idCpu = pR0Logger->idCpu;
2002 pchBuf[1] = s_szHex[ idCpu & 15];
2003 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
2004
2005 return 2;
2006#else
2007 return 0;
2008#endif
2009}
2010
2011#ifdef LOG_ENABLED
2012
2013/**
2014 * Disables flushing of the ring-0 debug log.
2015 *
2016 * @param pVCpu Pointer to the VMCPU.
2017 */
2018VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
2019{
2020 if (pVCpu->vmm.s.pR0LoggerR0)
2021 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2022}
2023
2024
2025/**
2026 * Enables flushing of the ring-0 debug log.
2027 *
2028 * @param pVCpu Pointer to the VMCPU.
2029 */
2030VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
2031{
2032 if (pVCpu->vmm.s.pR0LoggerR0)
2033 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2034}
2035
2036
2037/**
2038 * Checks if log flushing is disabled or not.
2039 *
2040 * @param pVCpu Pointer to the VMCPU.
2041 */
2042VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
2043{
2044 if (pVCpu->vmm.s.pR0LoggerR0)
2045 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2046 return true;
2047}
2048#endif /* LOG_ENABLED */
2049
2050/**
2051 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2052 *
2053 * @returns true if the breakpoint should be hit, false if it should be ignored.
2054 */
2055DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2056{
2057#if 0
2058 return true;
2059#else
2060 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2061 if (pVM)
2062 {
2063 PVMCPU pVCpu = VMMGetCpu(pVM);
2064
2065 if (pVCpu)
2066 {
2067#ifdef RT_ARCH_X86
2068 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2069 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2070#else
2071 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2072 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2073#endif
2074 {
2075 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2076 return RT_FAILURE_NP(rc);
2077 }
2078 }
2079 }
2080#ifdef RT_OS_LINUX
2081 return true;
2082#else
2083 return false;
2084#endif
2085#endif
2086}
2087
2088
2089/**
2090 * Override this so we can push it up to ring-3.
2091 *
2092 * @param pszExpr Expression. Can be NULL.
2093 * @param uLine Location line number.
2094 * @param pszFile Location file name.
2095 * @param pszFunction Location function name.
2096 */
2097DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2098{
2099 /*
2100 * To the log.
2101 */
2102 LogAlways(("\n!!R0-Assertion Failed!!\n"
2103 "Expression: %s\n"
2104 "Location : %s(%d) %s\n",
2105 pszExpr, pszFile, uLine, pszFunction));
2106
2107 /*
2108 * To the global VMM buffer.
2109 */
2110 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2111 if (pVM)
2112 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2113 "\n!!R0-Assertion Failed!!\n"
2114 "Expression: %s\n"
2115 "Location : %s(%d) %s\n",
2116 pszExpr, pszFile, uLine, pszFunction);
2117
2118 /*
2119 * Continue the normal way.
2120 */
2121 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2122}
2123
2124
2125/**
2126 * Callback for RTLogFormatV which writes to the ring-3 log port.
2127 * See PFNLOGOUTPUT() for details.
2128 */
2129static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2130{
2131 for (size_t i = 0; i < cbChars; i++)
2132 LogAlways(("%c", pachChars[i]));
2133
2134 NOREF(pv);
2135 return cbChars;
2136}
2137
2138
2139/**
2140 * Override this so we can push it up to ring-3.
2141 *
2142 * @param pszFormat The format string.
2143 * @param va Arguments.
2144 */
2145DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2146{
2147 va_list vaCopy;
2148
2149 /*
2150 * Push the message to the loggers.
2151 */
2152 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2153 if (pLog)
2154 {
2155 va_copy(vaCopy, va);
2156 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2157 va_end(vaCopy);
2158 }
2159 pLog = RTLogRelGetDefaultInstance();
2160 if (pLog)
2161 {
2162 va_copy(vaCopy, va);
2163 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2164 va_end(vaCopy);
2165 }
2166
2167 /*
2168 * Push it to the global VMM buffer.
2169 */
2170 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2171 if (pVM)
2172 {
2173 va_copy(vaCopy, va);
2174 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2175 va_end(vaCopy);
2176 }
2177
2178 /*
2179 * Continue the normal way.
2180 */
2181 RTAssertMsg2V(pszFormat, va);
2182}
2183
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette