VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 60364

Last change on this file since 60364 was 60364, checked in by vboxsync, 9 years ago

VMM: APIC R0 init/term ordering and nits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 81.3 KB
Line 
1/* $Id: VMMR0.cpp 60364 2016-04-06 16:08:15Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/stam.h>
30#include <VBox/vmm/tm.h>
31#include "VMMInternal.h"
32#include <VBox/vmm/vm.h>
33#ifdef VBOX_WITH_PCI_PASSTHROUGH
34# include <VBox/vmm/pdmpci.h>
35#endif
36#ifdef VBOX_WITH_NEW_APIC
37# include <VBox/vmm/apic.h>
38#endif
39
40#include <VBox/vmm/gvmm.h>
41#include <VBox/vmm/gmm.h>
42#include <VBox/vmm/gim.h>
43#include <VBox/intnet.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/param.h>
46#include <VBox/err.h>
47#include <VBox/version.h>
48#include <VBox/log.h>
49
50#include <iprt/asm-amd64-x86.h>
51#include <iprt/assert.h>
52#include <iprt/crc.h>
53#include <iprt/mp.h>
54#include <iprt/once.h>
55#include <iprt/stdarg.h>
56#include <iprt/string.h>
57#include <iprt/thread.h>
58#include <iprt/timer.h>
59
60#include "dtrace/VBoxVMM.h"
61
62
63#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
64# pragma intrinsic(_AddressOfReturnAddress)
65#endif
66
67#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
68# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
69#endif
70
71
72
73/*********************************************************************************************************************************
74* Defined Constants And Macros *
75*********************************************************************************************************************************/
76/** @def VMM_CHECK_SMAP_SETUP
77 * SMAP check setup. */
78/** @def VMM_CHECK_SMAP_CHECK
79 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
80 * it will be logged and @a a_BadExpr is executed. */
81/** @def VMM_CHECK_SMAP_CHECK2
82 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
83 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
84 * executed. */
85#if defined(VBOX_STRICT) || 1
86# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
87# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
88 do { \
89 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
90 { \
91 RTCCUINTREG fEflCheck = ASMGetFlags(); \
92 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
93 { /* likely */ } \
94 else \
95 { \
96 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
97 a_BadExpr; \
98 } \
99 } \
100 } while (0)
101# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
102 do { \
103 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
104 { \
105 RTCCUINTREG fEflCheck = ASMGetFlags(); \
106 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
107 { /* likely */ } \
108 else \
109 { \
110 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
111 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
112 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
113 a_BadExpr; \
114 } \
115 } \
116 } while (0)
117#else
118# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
119# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
120# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) NOREF(fKernelFeatures)
121#endif
122
123
124/*********************************************************************************************************************************
125* Internal Functions *
126*********************************************************************************************************************************/
127RT_C_DECLS_BEGIN
128#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
129extern uint64_t __udivdi3(uint64_t, uint64_t);
130extern uint64_t __umoddi3(uint64_t, uint64_t);
131#endif
132RT_C_DECLS_END
133
134
135/*********************************************************************************************************************************
136* Global Variables *
137*********************************************************************************************************************************/
138/** Drag in necessary library bits.
139 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
140PFNRT g_VMMR0Deps[] =
141{
142 (PFNRT)RTCrc32,
143 (PFNRT)RTOnce,
144#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
145 (PFNRT)__udivdi3,
146 (PFNRT)__umoddi3,
147#endif
148 NULL
149};
150
151#ifdef RT_OS_SOLARIS
152/* Dependency information for the native solaris loader. */
153extern "C" { char _depends_on[] = "vboxdrv"; }
154#endif
155
156
157
158/**
159 * Initialize the module.
160 * This is called when we're first loaded.
161 *
162 * @returns 0 on success.
163 * @returns VBox status on failure.
164 * @param hMod Image handle for use in APIs.
165 */
166DECLEXPORT(int) ModuleInit(void *hMod)
167{
168 VMM_CHECK_SMAP_SETUP();
169 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
170
171#ifdef VBOX_WITH_DTRACE_R0
172 /*
173 * The first thing to do is register the static tracepoints.
174 * (Deregistration is automatic.)
175 */
176 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
177 if (RT_FAILURE(rc2))
178 return rc2;
179#endif
180 LogFlow(("ModuleInit:\n"));
181
182#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
183 /*
184 * Display the CMOS debug code.
185 */
186 ASMOutU8(0x72, 0x03);
187 uint8_t bDebugCode = ASMInU8(0x73);
188 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
189 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
190#endif
191
192 /*
193 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
194 */
195 int rc = vmmInitFormatTypes();
196 if (RT_SUCCESS(rc))
197 {
198 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
199 rc = GVMMR0Init();
200 if (RT_SUCCESS(rc))
201 {
202 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
203 rc = GMMR0Init();
204 if (RT_SUCCESS(rc))
205 {
206 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
207 rc = HMR0Init();
208 if (RT_SUCCESS(rc))
209 {
210 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
211 rc = PGMRegisterStringFormatTypes();
212 if (RT_SUCCESS(rc))
213 {
214 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
215#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
216 rc = PGMR0DynMapInit();
217#endif
218 if (RT_SUCCESS(rc))
219 {
220 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
221 rc = IntNetR0Init();
222 if (RT_SUCCESS(rc))
223 {
224#ifdef VBOX_WITH_PCI_PASSTHROUGH
225 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
226 rc = PciRawR0Init();
227#endif
228 if (RT_SUCCESS(rc))
229 {
230 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
231 rc = CPUMR0ModuleInit();
232 if (RT_SUCCESS(rc))
233 {
234#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
235 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
236 rc = vmmR0TripleFaultHackInit();
237 if (RT_SUCCESS(rc))
238#endif
239 {
240 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
241 if (RT_SUCCESS(rc))
242 {
243 LogFlow(("ModuleInit: returns success.\n"));
244 return VINF_SUCCESS;
245 }
246 }
247
248 /*
249 * Bail out.
250 */
251#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
252 vmmR0TripleFaultHackTerm();
253#endif
254 }
255 else
256 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
257#ifdef VBOX_WITH_PCI_PASSTHROUGH
258 PciRawR0Term();
259#endif
260 }
261 else
262 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
263 IntNetR0Term();
264 }
265 else
266 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
267#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
268 PGMR0DynMapTerm();
269#endif
270 }
271 else
272 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
273 PGMDeregisterStringFormatTypes();
274 }
275 else
276 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
277 HMR0Term();
278 }
279 else
280 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
281 GMMR0Term();
282 }
283 else
284 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
285 GVMMR0Term();
286 }
287 else
288 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
289 vmmTermFormatTypes();
290 }
291 else
292 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
293
294 LogFlow(("ModuleInit: failed %Rrc\n", rc));
295 return rc;
296}
297
298
299/**
300 * Terminate the module.
301 * This is called when we're finally unloaded.
302 *
303 * @param hMod Image handle for use in APIs.
304 */
305DECLEXPORT(void) ModuleTerm(void *hMod)
306{
307 NOREF(hMod);
308 LogFlow(("ModuleTerm:\n"));
309
310 /*
311 * Terminate the CPUM module (Local APIC cleanup).
312 */
313 CPUMR0ModuleTerm();
314
315 /*
316 * Terminate the internal network service.
317 */
318 IntNetR0Term();
319
320 /*
321 * PGM (Darwin), HM and PciRaw global cleanup.
322 */
323#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
324 PGMR0DynMapTerm();
325#endif
326#ifdef VBOX_WITH_PCI_PASSTHROUGH
327 PciRawR0Term();
328#endif
329 PGMDeregisterStringFormatTypes();
330 HMR0Term();
331#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
332 vmmR0TripleFaultHackTerm();
333#endif
334
335 /*
336 * Destroy the GMM and GVMM instances.
337 */
338 GMMR0Term();
339 GVMMR0Term();
340
341 vmmTermFormatTypes();
342
343 LogFlow(("ModuleTerm: returns\n"));
344}
345
346
347/**
348 * Initiates the R0 driver for a particular VM instance.
349 *
350 * @returns VBox status code.
351 *
352 * @param pVM The cross context VM structure.
353 * @param uSvnRev The SVN revision of the ring-3 part.
354 * @param uBuildType Build type indicator.
355 * @thread EMT.
356 */
357static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
358{
359 VMM_CHECK_SMAP_SETUP();
360 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
361
362 /*
363 * Match the SVN revisions and build type.
364 */
365 if (uSvnRev != VMMGetSvnRev())
366 {
367 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
368 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
369 return VERR_VMM_R0_VERSION_MISMATCH;
370 }
371 if (uBuildType != vmmGetBuildType())
372 {
373 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
374 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
375 return VERR_VMM_R0_VERSION_MISMATCH;
376 }
377 if ( !VALID_PTR(pVM)
378 || pVM->pVMR0 != pVM)
379 return VERR_INVALID_PARAMETER;
380
381
382#ifdef LOG_ENABLED
383 /*
384 * Register the EMT R0 logger instance for VCPU 0.
385 */
386 PVMCPU pVCpu = &pVM->aCpus[0];
387
388 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
389 if (pR0Logger)
390 {
391# if 0 /* testing of the logger. */
392 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
393 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
394 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
395 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
396
397 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
398 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
399 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
400 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
401
402 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
403 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
404 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
405 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
406
407 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
408 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
409 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
410 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
411 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
412 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
413
414 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
415 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
416
417 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
418 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
419 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
420# endif
421 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
422 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
423 pR0Logger->fRegistered = true;
424 }
425#endif /* LOG_ENABLED */
426
427 /*
428 * Check if the host supports high resolution timers or not.
429 */
430 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
431 && !RTTimerCanDoHighResolution())
432 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
433
434 /*
435 * Initialize the per VM data for GVMM and GMM.
436 */
437 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
438 int rc = GVMMR0InitVM(pVM);
439// if (RT_SUCCESS(rc))
440// rc = GMMR0InitPerVMData(pVM);
441 if (RT_SUCCESS(rc))
442 {
443 /*
444 * Init HM, CPUM and PGM (Darwin only).
445 */
446 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
447 rc = HMR0InitVM(pVM);
448 if (RT_SUCCESS(rc))
449 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
450 if (RT_SUCCESS(rc))
451 {
452 rc = CPUMR0InitVM(pVM);
453 if (RT_SUCCESS(rc))
454 {
455 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
456#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
457 rc = PGMR0DynMapInitVM(pVM);
458#endif
459 if (RT_SUCCESS(rc))
460 {
461 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
462#ifdef VBOX_WITH_PCI_PASSTHROUGH
463 rc = PciRawR0InitVM(pVM);
464#endif
465 if (RT_SUCCESS(rc))
466 {
467 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
468 rc = GIMR0InitVM(pVM);
469 if (RT_SUCCESS(rc))
470 {
471#ifdef VBOX_WITH_NEW_APIC
472 rc = APICR0InitVM(pVM);
473#endif
474 if (RT_SUCCESS(rc))
475 {
476 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
477 if (RT_SUCCESS(rc))
478 {
479 GVMMR0DoneInitVM(pVM);
480 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
481 return rc;
482 }
483#ifdef VBOX_WITH_NEW_APIC
484 APICR0TermVM(pVM);
485#endif
486 }
487
488 /* bail out*/
489 GIMR0TermVM(pVM);
490 }
491#ifdef VBOX_WITH_PCI_PASSTHROUGH
492 PciRawR0TermVM(pVM);
493#endif
494 }
495 }
496 }
497 HMR0TermVM(pVM);
498 }
499 }
500
501 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
502 return rc;
503}
504
505
506/**
507 * Terminates the R0 bits for a particular VM instance.
508 *
509 * This is normally called by ring-3 as part of the VM termination process, but
510 * may alternatively be called during the support driver session cleanup when
511 * the VM object is destroyed (see GVMM).
512 *
513 * @returns VBox status code.
514 *
515 * @param pVM The cross context VM structure.
516 * @param pGVM Pointer to the global VM structure. Optional.
517 * @thread EMT or session clean up thread.
518 */
519VMMR0_INT_DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
520{
521#ifdef VBOX_WITH_PCI_PASSTHROUGH
522 PciRawR0TermVM(pVM);
523#endif
524
525 /*
526 * Tell GVMM what we're up to and check that we only do this once.
527 */
528 if (GVMMR0DoingTermVM(pVM, pGVM))
529 {
530#ifdef VBOX_WITH_NEW_APIC
531 APICR0TermVM(pVM);
532#endif
533 GIMR0TermVM(pVM);
534
535 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
536 * here to make sure we don't leak any shared pages if we crash... */
537#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
538 PGMR0DynMapTermVM(pVM);
539#endif
540 HMR0TermVM(pVM);
541 }
542
543 /*
544 * Deregister the logger.
545 */
546 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
547 return VINF_SUCCESS;
548}
549
550
551/**
552 * VMM ring-0 thread-context callback.
553 *
554 * This does common HM state updating and calls the HM-specific thread-context
555 * callback.
556 *
557 * @param enmEvent The thread-context event.
558 * @param pvUser Opaque pointer to the VMCPU.
559 *
560 * @thread EMT(pvUser)
561 */
562static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
563{
564 PVMCPU pVCpu = (PVMCPU)pvUser;
565
566 switch (enmEvent)
567 {
568 case RTTHREADCTXEVENT_IN:
569 {
570 /*
571 * Linux may call us with preemption enabled (really!) but technically we
572 * cannot get preempted here, otherwise we end up in an infinite recursion
573 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
574 * ad infinitum). Let's just disable preemption for now...
575 */
576 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
577 * preemption after doing the callout (one or two functions up the
578 * call chain). */
579 /** @todo r=ramshankar: See @bugref{5313#c30}. */
580 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
581 RTThreadPreemptDisable(&ParanoidPreemptState);
582
583 /* We need to update the VCPU <-> host CPU mapping. */
584 RTCPUID idHostCpu;
585 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
586 pVCpu->iHostCpuSet = iHostCpuSet;
587 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
588
589 /* In the very unlikely event that the GIP delta for the CPU we're
590 rescheduled needs calculating, try force a return to ring-3.
591 We unfortunately cannot do the measurements right here. */
592 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
593 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
594
595 /* Invoke the HM-specific thread-context callback. */
596 HMR0ThreadCtxCallback(enmEvent, pvUser);
597
598 /* Restore preemption. */
599 RTThreadPreemptRestore(&ParanoidPreemptState);
600 break;
601 }
602
603 case RTTHREADCTXEVENT_OUT:
604 {
605 /* Invoke the HM-specific thread-context callback. */
606 HMR0ThreadCtxCallback(enmEvent, pvUser);
607
608 /*
609 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
610 * have the same host CPU associated with it.
611 */
612 pVCpu->iHostCpuSet = UINT32_MAX;
613 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
614 break;
615 }
616
617 default:
618 /* Invoke the HM-specific thread-context callback. */
619 HMR0ThreadCtxCallback(enmEvent, pvUser);
620 break;
621 }
622}
623
624
625/**
626 * Creates thread switching hook for the current EMT thread.
627 *
628 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
629 * platform does not implement switcher hooks, no hooks will be create and the
630 * member set to NIL_RTTHREADCTXHOOK.
631 *
632 * @returns VBox status code.
633 * @param pVCpu The cross context virtual CPU structure.
634 * @thread EMT(pVCpu)
635 */
636VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu)
637{
638 VMCPU_ASSERT_EMT(pVCpu);
639 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
640
641 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
642 if (RT_SUCCESS(rc))
643 return rc;
644
645 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
646 if (rc == VERR_NOT_SUPPORTED)
647 return VINF_SUCCESS;
648
649 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
650 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
651}
652
653
654/**
655 * Destroys the thread switching hook for the specified VCPU.
656 *
657 * @param pVCpu The cross context virtual CPU structure.
658 * @remarks Can be called from any thread.
659 */
660VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu)
661{
662 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
663 AssertRC(rc);
664}
665
666
667/**
668 * Disables the thread switching hook for this VCPU (if we got one).
669 *
670 * @param pVCpu The cross context virtual CPU structure.
671 * @thread EMT(pVCpu)
672 *
673 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
674 * this call. This means you have to be careful with what you do!
675 */
676VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu)
677{
678 /*
679 * Clear the VCPU <-> host CPU mapping as we've left HM context.
680 * @bugref{7726#c19} explains the need for this trick:
681 *
682 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
683 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
684 * longjmp & normal return to ring-3, which opens a window where we may be
685 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
686 * the CPU starts executing a different EMT. Both functions first disables
687 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
688 * an opening for getting preempted.
689 */
690 /** @todo Make HM not need this API! Then we could leave the hooks enabled
691 * all the time. */
692 /** @todo move this into the context hook disabling if(). */
693 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
694
695 /*
696 * Disable the context hook, if we got one.
697 */
698 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
699 {
700 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
701 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
702 AssertRC(rc);
703 }
704}
705
706
707/**
708 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
709 *
710 * @returns true if registered, false otherwise.
711 * @param pVCpu The cross context virtual CPU structure.
712 */
713DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
714{
715 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
716}
717
718
719/**
720 * Whether thread-context hooks are registered for this VCPU.
721 *
722 * @returns true if registered, false otherwise.
723 * @param pVCpu The cross context virtual CPU structure.
724 */
725VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
726{
727 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
728}
729
730
731#ifdef VBOX_WITH_STATISTICS
732/**
733 * Record return code statistics
734 * @param pVM The cross context VM structure.
735 * @param pVCpu The cross context virtual CPU structure.
736 * @param rc The status code.
737 */
738static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
739{
740 /*
741 * Collect statistics.
742 */
743 switch (rc)
744 {
745 case VINF_SUCCESS:
746 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
747 break;
748 case VINF_EM_RAW_INTERRUPT:
749 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
750 break;
751 case VINF_EM_RAW_INTERRUPT_HYPER:
752 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
753 break;
754 case VINF_EM_RAW_GUEST_TRAP:
755 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
756 break;
757 case VINF_EM_RAW_RING_SWITCH:
758 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
759 break;
760 case VINF_EM_RAW_RING_SWITCH_INT:
761 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
762 break;
763 case VINF_EM_RAW_STALE_SELECTOR:
764 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
765 break;
766 case VINF_EM_RAW_IRET_TRAP:
767 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
768 break;
769 case VINF_IOM_R3_IOPORT_READ:
770 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
771 break;
772 case VINF_IOM_R3_IOPORT_WRITE:
773 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
774 break;
775 case VINF_IOM_R3_MMIO_READ:
776 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
777 break;
778 case VINF_IOM_R3_MMIO_WRITE:
779 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
780 break;
781 case VINF_IOM_R3_MMIO_READ_WRITE:
782 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
783 break;
784 case VINF_PATM_HC_MMIO_PATCH_READ:
785 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
786 break;
787 case VINF_PATM_HC_MMIO_PATCH_WRITE:
788 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
789 break;
790 case VINF_CPUM_R3_MSR_READ:
791 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
792 break;
793 case VINF_CPUM_R3_MSR_WRITE:
794 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
795 break;
796 case VINF_EM_RAW_EMULATE_INSTR:
797 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
798 break;
799 case VINF_EM_RAW_EMULATE_IO_BLOCK:
800 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
801 break;
802 case VINF_PATCH_EMULATE_INSTR:
803 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
804 break;
805 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
806 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
807 break;
808 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
809 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
810 break;
811 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
812 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
813 break;
814 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
815 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
816 break;
817 case VINF_CSAM_PENDING_ACTION:
818 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
819 break;
820 case VINF_PGM_SYNC_CR3:
821 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
822 break;
823 case VINF_PATM_PATCH_INT3:
824 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
825 break;
826 case VINF_PATM_PATCH_TRAP_PF:
827 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
828 break;
829 case VINF_PATM_PATCH_TRAP_GP:
830 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
831 break;
832 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
833 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
834 break;
835 case VINF_EM_RESCHEDULE_REM:
836 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
837 break;
838 case VINF_EM_RAW_TO_R3:
839 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
840 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
841 else if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
842 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
843 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES))
844 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
845 else if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
846 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
847 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
848 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
849 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))
850 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
851 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
852 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
853 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3))
854 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
855 else
856 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
857 break;
858
859 case VINF_EM_RAW_TIMER_PENDING:
860 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
861 break;
862 case VINF_EM_RAW_INTERRUPT_PENDING:
863 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
864 break;
865 case VINF_VMM_CALL_HOST:
866 switch (pVCpu->vmm.s.enmCallRing3Operation)
867 {
868 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
869 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
870 break;
871 case VMMCALLRING3_PDM_LOCK:
872 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
873 break;
874 case VMMCALLRING3_PGM_POOL_GROW:
875 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
876 break;
877 case VMMCALLRING3_PGM_LOCK:
878 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
879 break;
880 case VMMCALLRING3_PGM_MAP_CHUNK:
881 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
882 break;
883 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
884 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
885 break;
886 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
887 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
888 break;
889 case VMMCALLRING3_VMM_LOGGER_FLUSH:
890 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
891 break;
892 case VMMCALLRING3_VM_SET_ERROR:
893 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
894 break;
895 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
896 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
897 break;
898 case VMMCALLRING3_VM_R0_ASSERTION:
899 default:
900 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
901 break;
902 }
903 break;
904 case VINF_PATM_DUPLICATE_FUNCTION:
905 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
906 break;
907 case VINF_PGM_CHANGE_MODE:
908 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
909 break;
910 case VINF_PGM_POOL_FLUSH_PENDING:
911 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
912 break;
913 case VINF_EM_PENDING_REQUEST:
914 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
915 break;
916 case VINF_EM_HM_PATCH_TPR_INSTR:
917 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
918 break;
919 default:
920 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
921 break;
922 }
923}
924#endif /* VBOX_WITH_STATISTICS */
925
926
927/**
928 * The Ring 0 entry point, called by the fast-ioctl path.
929 *
930 * @param pVM The cross context VM structure.
931 * The return code is stored in pVM->vmm.s.iLastGZRc.
932 * @param idCpu The Virtual CPU ID of the calling EMT.
933 * @param enmOperation Which operation to execute.
934 * @remarks Assume called with interrupts _enabled_.
935 */
936VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
937{
938 /*
939 * Validation.
940 */
941 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
942 return;
943 PVMCPU pVCpu = &pVM->aCpus[idCpu];
944 if (RT_UNLIKELY(pVCpu->hNativeThreadR0 != RTThreadNativeSelf()))
945 return;
946 VMM_CHECK_SMAP_SETUP();
947 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
948
949 /*
950 * Perform requested operation.
951 */
952 switch (enmOperation)
953 {
954 /*
955 * Switch to GC and run guest raw mode code.
956 * Disable interrupts before doing the world switch.
957 */
958 case VMMR0_DO_RAW_RUN:
959 {
960#ifdef VBOX_WITH_RAW_MODE
961# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
962 /* Some safety precautions first. */
963 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
964 {
965 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
966 break;
967 }
968# endif
969
970 /*
971 * Disable preemption.
972 */
973 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
974 RTThreadPreemptDisable(&PreemptState);
975
976 /*
977 * Get the host CPU identifiers, make sure they are valid and that
978 * we've got a TSC delta for the CPU.
979 */
980 RTCPUID idHostCpu;
981 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
982 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
983 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
984 {
985 /*
986 * Commit the CPU identifiers and update the periodict preemption timer if it's active.
987 */
988# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
989 CPUMR0SetLApic(pVCpu, iHostCpuSet);
990# endif
991 pVCpu->iHostCpuSet = iHostCpuSet;
992 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
993
994 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
995 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
996
997 /*
998 * We might need to disable VT-x if the active switcher turns off paging.
999 */
1000 bool fVTxDisabled;
1001 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1002 if (RT_SUCCESS(rc))
1003 {
1004 /*
1005 * Disable interrupts and run raw-mode code. The loop is for efficiently
1006 * dispatching tracepoints that fired in raw-mode context.
1007 */
1008 RTCCUINTREG uFlags = ASMIntDisableFlags();
1009
1010 for (;;)
1011 {
1012 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
1013 TMNotifyStartOfExecution(pVCpu);
1014
1015 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1016 pVCpu->vmm.s.iLastGZRc = rc;
1017
1018 TMNotifyEndOfExecution(pVCpu);
1019 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1020
1021 if (rc != VINF_VMM_CALL_TRACER)
1022 break;
1023 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
1024 }
1025
1026 /*
1027 * Re-enable VT-x before we dispatch any pending host interrupts and
1028 * re-enables interrupts.
1029 */
1030 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1031
1032 if ( rc == VINF_EM_RAW_INTERRUPT
1033 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1034 TRPMR0DispatchHostInterrupt(pVM);
1035
1036 ASMSetFlags(uFlags);
1037
1038 /* Fire dtrace probe and collect statistics. */
1039 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1040# ifdef VBOX_WITH_STATISTICS
1041 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1042 vmmR0RecordRC(pVM, pVCpu, rc);
1043# endif
1044 }
1045 else
1046 pVCpu->vmm.s.iLastGZRc = rc;
1047
1048 /*
1049 * Invalidate the host CPU identifiers as we restore preemption.
1050 */
1051 pVCpu->iHostCpuSet = UINT32_MAX;
1052 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1053
1054 RTThreadPreemptRestore(&PreemptState);
1055 }
1056 /*
1057 * Invalid CPU set index or TSC delta in need of measuring.
1058 */
1059 else
1060 {
1061 RTThreadPreemptRestore(&PreemptState);
1062 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1063 {
1064 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1065 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1066 0 /*default cTries*/);
1067 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1068 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1069 else
1070 pVCpu->vmm.s.iLastGZRc = rc;
1071 }
1072 else
1073 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1074 }
1075
1076#else /* !VBOX_WITH_RAW_MODE */
1077 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_NOT_SUPPORTED;
1078#endif
1079 break;
1080 }
1081
1082 /*
1083 * Run guest code using the available hardware acceleration technology.
1084 */
1085 case VMMR0_DO_HM_RUN:
1086 {
1087 /*
1088 * Disable preemption.
1089 */
1090 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1091 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1092 RTThreadPreemptDisable(&PreemptState);
1093
1094 /*
1095 * Get the host CPU identifiers, make sure they are valid and that
1096 * we've got a TSC delta for the CPU.
1097 */
1098 RTCPUID idHostCpu;
1099 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1100 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1101 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1102 {
1103 pVCpu->iHostCpuSet = iHostCpuSet;
1104 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1105
1106 /*
1107 * Update the periodic preemption timer if it's active.
1108 */
1109 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1110 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1111 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1112
1113#ifdef LOG_ENABLED
1114 /*
1115 * Ugly: Lazy registration of ring 0 loggers.
1116 */
1117 if (pVCpu->idCpu > 0)
1118 {
1119 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
1120 if ( pR0Logger
1121 && RT_UNLIKELY(!pR0Logger->fRegistered))
1122 {
1123 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
1124 pR0Logger->fRegistered = true;
1125 }
1126 }
1127#endif
1128
1129 int rc;
1130 bool fPreemptRestored = false;
1131 if (!HMR0SuspendPending())
1132 {
1133 /*
1134 * Enable the context switching hook.
1135 */
1136 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1137 {
1138 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1139 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1140 }
1141
1142 /*
1143 * Enter HM context.
1144 */
1145 rc = HMR0Enter(pVM, pVCpu);
1146 if (RT_SUCCESS(rc))
1147 {
1148 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1149
1150 /*
1151 * When preemption hooks are in place, enable preemption now that
1152 * we're in HM context.
1153 */
1154 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1155 {
1156 fPreemptRestored = true;
1157 RTThreadPreemptRestore(&PreemptState);
1158 }
1159
1160 /*
1161 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1162 */
1163 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1164 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1165 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1166
1167 /*
1168 * Assert sanity on the way out. Using manual assertions code here as normal
1169 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1170 */
1171 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1172 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1173 {
1174 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1175 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1176 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1177 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1178 }
1179 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1180 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1181 {
1182 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1183 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1184 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1185 rc = VERR_INVALID_STATE;
1186 }
1187
1188 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1189 }
1190 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1191
1192 /*
1193 * Invalidate the host CPU identifiers before we disable the context
1194 * hook / restore preemption.
1195 */
1196 pVCpu->iHostCpuSet = UINT32_MAX;
1197 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1198
1199 /*
1200 * Disable context hooks. Due to unresolved cleanup issues, we
1201 * cannot leave the hooks enabled when we return to ring-3.
1202 *
1203 * Note! At the moment HM may also have disabled the hook
1204 * when we get here, but the IPRT API handles that.
1205 */
1206 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1207 {
1208 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1209 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1210 }
1211 }
1212 /*
1213 * The system is about to go into suspend mode; go back to ring 3.
1214 */
1215 else
1216 {
1217 rc = VINF_EM_RAW_INTERRUPT;
1218 pVCpu->iHostCpuSet = UINT32_MAX;
1219 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1220 }
1221
1222 /** @todo When HM stops messing with the context hook state, we'll disable
1223 * preemption again before the RTThreadCtxHookDisable call. */
1224 if (!fPreemptRestored)
1225 RTThreadPreemptRestore(&PreemptState);
1226
1227 pVCpu->vmm.s.iLastGZRc = rc;
1228
1229 /* Fire dtrace probe and collect statistics. */
1230 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1231#ifdef VBOX_WITH_STATISTICS
1232 vmmR0RecordRC(pVM, pVCpu, rc);
1233#endif
1234 }
1235 /*
1236 * Invalid CPU set index or TSC delta in need of measuring.
1237 */
1238 else
1239 {
1240 pVCpu->iHostCpuSet = UINT32_MAX;
1241 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1242 RTThreadPreemptRestore(&PreemptState);
1243 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1244 {
1245 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1246 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1247 0 /*default cTries*/);
1248 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1249 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1250 else
1251 pVCpu->vmm.s.iLastGZRc = rc;
1252 }
1253 else
1254 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1255 }
1256 break;
1257 }
1258
1259 /*
1260 * For profiling.
1261 */
1262 case VMMR0_DO_NOP:
1263 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1264 break;
1265
1266 /*
1267 * Impossible.
1268 */
1269 default:
1270 AssertMsgFailed(("%#x\n", enmOperation));
1271 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1272 break;
1273 }
1274 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1275}
1276
1277
1278/**
1279 * Validates a session or VM session argument.
1280 *
1281 * @returns true / false accordingly.
1282 * @param pVM The cross context VM structure.
1283 * @param pClaimedSession The session claim to validate.
1284 * @param pSession The session argument.
1285 */
1286DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1287{
1288 /* This must be set! */
1289 if (!pSession)
1290 return false;
1291
1292 /* Only one out of the two. */
1293 if (pVM && pClaimedSession)
1294 return false;
1295 if (pVM)
1296 pClaimedSession = pVM->pSession;
1297 return pClaimedSession == pSession;
1298}
1299
1300
1301/**
1302 * VMMR0EntryEx worker function, either called directly or when ever possible
1303 * called thru a longjmp so we can exit safely on failure.
1304 *
1305 * @returns VBox status code.
1306 * @param pVM The cross context VM structure.
1307 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1308 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1309 * @param enmOperation Which operation to execute.
1310 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1311 * The support driver validates this if it's present.
1312 * @param u64Arg Some simple constant argument.
1313 * @param pSession The session of the caller.
1314 * @remarks Assume called with interrupts _enabled_.
1315 */
1316static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1317{
1318 /*
1319 * Common VM pointer validation.
1320 */
1321 if (pVM)
1322 {
1323 if (RT_UNLIKELY( !VALID_PTR(pVM)
1324 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
1325 {
1326 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
1327 return VERR_INVALID_POINTER;
1328 }
1329 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
1330 || pVM->enmVMState > VMSTATE_TERMINATED
1331 || pVM->pVMR0 != pVM))
1332 {
1333 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
1334 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
1335 return VERR_INVALID_POINTER;
1336 }
1337
1338 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
1339 {
1340 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
1341 return VERR_INVALID_PARAMETER;
1342 }
1343 }
1344 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
1345 {
1346 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1347 return VERR_INVALID_PARAMETER;
1348 }
1349 VMM_CHECK_SMAP_SETUP();
1350 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1351 int rc;
1352
1353 switch (enmOperation)
1354 {
1355 /*
1356 * GVM requests
1357 */
1358 case VMMR0_DO_GVMM_CREATE_VM:
1359 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
1360 return VERR_INVALID_PARAMETER;
1361 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
1362 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1363 break;
1364
1365 case VMMR0_DO_GVMM_DESTROY_VM:
1366 if (pReqHdr || u64Arg)
1367 return VERR_INVALID_PARAMETER;
1368 rc = GVMMR0DestroyVM(pVM);
1369 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1370 break;
1371
1372 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1373 {
1374 if (!pVM)
1375 return VERR_INVALID_PARAMETER;
1376 rc = GVMMR0RegisterVCpu(pVM, idCpu);
1377 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1378 break;
1379 }
1380
1381 case VMMR0_DO_GVMM_SCHED_HALT:
1382 if (pReqHdr)
1383 return VERR_INVALID_PARAMETER;
1384 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1385 rc = GVMMR0SchedHalt(pVM, idCpu, u64Arg);
1386 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1387 break;
1388
1389 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1390 if (pReqHdr || u64Arg)
1391 return VERR_INVALID_PARAMETER;
1392 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1393 rc = GVMMR0SchedWakeUp(pVM, idCpu);
1394 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1395 break;
1396
1397 case VMMR0_DO_GVMM_SCHED_POKE:
1398 if (pReqHdr || u64Arg)
1399 return VERR_INVALID_PARAMETER;
1400 rc = GVMMR0SchedPoke(pVM, idCpu);
1401 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1402 break;
1403
1404 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1405 if (u64Arg)
1406 return VERR_INVALID_PARAMETER;
1407 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1408 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1409 break;
1410
1411 case VMMR0_DO_GVMM_SCHED_POLL:
1412 if (pReqHdr || u64Arg > 1)
1413 return VERR_INVALID_PARAMETER;
1414 rc = GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
1415 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1416 break;
1417
1418 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1419 if (u64Arg)
1420 return VERR_INVALID_PARAMETER;
1421 rc = GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
1422 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1423 break;
1424
1425 case VMMR0_DO_GVMM_RESET_STATISTICS:
1426 if (u64Arg)
1427 return VERR_INVALID_PARAMETER;
1428 rc = GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
1429 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1430 break;
1431
1432 /*
1433 * Initialize the R0 part of a VM instance.
1434 */
1435 case VMMR0_DO_VMMR0_INIT:
1436 rc = vmmR0InitVM(pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1437 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1438 break;
1439
1440 /*
1441 * Terminate the R0 part of a VM instance.
1442 */
1443 case VMMR0_DO_VMMR0_TERM:
1444 rc = VMMR0TermVM(pVM, NULL);
1445 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1446 break;
1447
1448 /*
1449 * Attempt to enable hm mode and check the current setting.
1450 */
1451 case VMMR0_DO_HM_ENABLE:
1452 rc = HMR0EnableAllCpus(pVM);
1453 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1454 break;
1455
1456 /*
1457 * Setup the hardware accelerated session.
1458 */
1459 case VMMR0_DO_HM_SETUP_VM:
1460 rc = HMR0SetupVM(pVM);
1461 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1462 break;
1463
1464 /*
1465 * Switch to RC to execute Hypervisor function.
1466 */
1467 case VMMR0_DO_CALL_HYPERVISOR:
1468 {
1469#ifdef VBOX_WITH_RAW_MODE
1470 /*
1471 * Validate input / context.
1472 */
1473 if (RT_UNLIKELY(idCpu != 0))
1474 return VERR_INVALID_CPU_ID;
1475 if (RT_UNLIKELY(pVM->cCpus != 1))
1476 return VERR_INVALID_PARAMETER;
1477 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1478# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1479 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1480 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1481# endif
1482
1483 /*
1484 * Disable interrupts.
1485 */
1486 RTCCUINTREG fFlags = ASMIntDisableFlags();
1487
1488 /*
1489 * Get the host CPU identifiers, make sure they are valid and that
1490 * we've got a TSC delta for the CPU.
1491 */
1492 RTCPUID idHostCpu;
1493 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1494 if (RT_UNLIKELY(iHostCpuSet >= RTCPUSET_MAX_CPUS))
1495 {
1496 ASMSetFlags(fFlags);
1497 return VERR_INVALID_CPU_INDEX;
1498 }
1499 if (RT_UNLIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1500 {
1501 ASMSetFlags(fFlags);
1502 rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1503 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1504 0 /*default cTries*/);
1505 if (RT_FAILURE(rc) && rc != VERR_CPU_OFFLINE)
1506 {
1507 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1508 return rc;
1509 }
1510 }
1511
1512 /*
1513 * Commit the CPU identifiers.
1514 */
1515# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1516 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1517# endif
1518 pVCpu->iHostCpuSet = iHostCpuSet;
1519 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1520
1521 /*
1522 * We might need to disable VT-x if the active switcher turns off paging.
1523 */
1524 bool fVTxDisabled;
1525 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1526 if (RT_SUCCESS(rc))
1527 {
1528 /*
1529 * Go through the wormhole...
1530 */
1531 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1532
1533 /*
1534 * Re-enable VT-x before we dispatch any pending host interrupts.
1535 */
1536 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1537
1538 if ( rc == VINF_EM_RAW_INTERRUPT
1539 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1540 TRPMR0DispatchHostInterrupt(pVM);
1541 }
1542
1543 /*
1544 * Invalidate the host CPU identifiers as we restore interrupts.
1545 */
1546 pVCpu->iHostCpuSet = UINT32_MAX;
1547 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1548 ASMSetFlags(fFlags);
1549
1550#else /* !VBOX_WITH_RAW_MODE */
1551 rc = VERR_RAW_MODE_NOT_SUPPORTED;
1552#endif
1553 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1554 break;
1555 }
1556
1557 /*
1558 * PGM wrappers.
1559 */
1560 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1561 if (idCpu == NIL_VMCPUID)
1562 return VERR_INVALID_CPU_ID;
1563 rc = PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
1564 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1565 break;
1566
1567 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1568 if (idCpu == NIL_VMCPUID)
1569 return VERR_INVALID_CPU_ID;
1570 rc = PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu]);
1571 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1572 break;
1573
1574 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1575 if (idCpu == NIL_VMCPUID)
1576 return VERR_INVALID_CPU_ID;
1577 rc = PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
1578 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1579 break;
1580
1581 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1582 if (idCpu != 0)
1583 return VERR_INVALID_CPU_ID;
1584 rc = PGMR0PhysSetupIommu(pVM);
1585 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1586 break;
1587
1588 /*
1589 * GMM wrappers.
1590 */
1591 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1592 if (u64Arg)
1593 return VERR_INVALID_PARAMETER;
1594 rc = GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1595 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1596 break;
1597
1598 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1599 if (u64Arg)
1600 return VERR_INVALID_PARAMETER;
1601 rc = GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1602 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1603 break;
1604
1605 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1606 if (u64Arg)
1607 return VERR_INVALID_PARAMETER;
1608 rc = GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1609 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1610 break;
1611
1612 case VMMR0_DO_GMM_FREE_PAGES:
1613 if (u64Arg)
1614 return VERR_INVALID_PARAMETER;
1615 rc = GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1616 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1617 break;
1618
1619 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1620 if (u64Arg)
1621 return VERR_INVALID_PARAMETER;
1622 rc = GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1623 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1624 break;
1625
1626 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1627 if (u64Arg)
1628 return VERR_INVALID_PARAMETER;
1629 rc = GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
1630 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1631 break;
1632
1633 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1634 if (idCpu == NIL_VMCPUID)
1635 return VERR_INVALID_CPU_ID;
1636 if (u64Arg)
1637 return VERR_INVALID_PARAMETER;
1638 rc = GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1639 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1640 break;
1641
1642 case VMMR0_DO_GMM_BALLOONED_PAGES:
1643 if (u64Arg)
1644 return VERR_INVALID_PARAMETER;
1645 rc = GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1646 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1647 break;
1648
1649 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1650 if (u64Arg)
1651 return VERR_INVALID_PARAMETER;
1652 rc = GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1653 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1654 break;
1655
1656 case VMMR0_DO_GMM_SEED_CHUNK:
1657 if (pReqHdr)
1658 return VERR_INVALID_PARAMETER;
1659 rc = GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
1660 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1661 break;
1662
1663 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1664 if (idCpu == NIL_VMCPUID)
1665 return VERR_INVALID_CPU_ID;
1666 if (u64Arg)
1667 return VERR_INVALID_PARAMETER;
1668 rc = GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1669 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1670 break;
1671
1672 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1673 if (idCpu == NIL_VMCPUID)
1674 return VERR_INVALID_CPU_ID;
1675 if (u64Arg)
1676 return VERR_INVALID_PARAMETER;
1677 rc = GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1678 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1679 break;
1680
1681 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1682 if (idCpu == NIL_VMCPUID)
1683 return VERR_INVALID_CPU_ID;
1684 if ( u64Arg
1685 || pReqHdr)
1686 return VERR_INVALID_PARAMETER;
1687 rc = GMMR0ResetSharedModules(pVM, idCpu);
1688 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1689 break;
1690
1691#ifdef VBOX_WITH_PAGE_SHARING
1692 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1693 {
1694 if (idCpu == NIL_VMCPUID)
1695 return VERR_INVALID_CPU_ID;
1696 if ( u64Arg
1697 || pReqHdr)
1698 return VERR_INVALID_PARAMETER;
1699
1700 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1701 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf());
1702
1703# ifdef DEBUG_sandervl
1704 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
1705 /* Todo: this can have bad side effects for unexpected jumps back to r3. */
1706 rc = GMMR0CheckSharedModulesStart(pVM);
1707 if (rc == VINF_SUCCESS)
1708 {
1709 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
1710 Assert( rc == VINF_SUCCESS
1711 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
1712 GMMR0CheckSharedModulesEnd(pVM);
1713 }
1714# else
1715 rc = GMMR0CheckSharedModules(pVM, pVCpu);
1716# endif
1717 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1718 break;
1719 }
1720#endif
1721
1722#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1723 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1724 if (u64Arg)
1725 return VERR_INVALID_PARAMETER;
1726 rc = GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1727 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1728 break;
1729#endif
1730
1731 case VMMR0_DO_GMM_QUERY_STATISTICS:
1732 if (u64Arg)
1733 return VERR_INVALID_PARAMETER;
1734 rc = GMMR0QueryStatisticsReq(pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1735 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1736 break;
1737
1738 case VMMR0_DO_GMM_RESET_STATISTICS:
1739 if (u64Arg)
1740 return VERR_INVALID_PARAMETER;
1741 rc = GMMR0ResetStatisticsReq(pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1742 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1743 break;
1744
1745 /*
1746 * A quick GCFGM mock-up.
1747 */
1748 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1749 case VMMR0_DO_GCFGM_SET_VALUE:
1750 case VMMR0_DO_GCFGM_QUERY_VALUE:
1751 {
1752 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1753 return VERR_INVALID_PARAMETER;
1754 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1755 if (pReq->Hdr.cbReq != sizeof(*pReq))
1756 return VERR_INVALID_PARAMETER;
1757 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1758 {
1759 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1760 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1761 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1762 }
1763 else
1764 {
1765 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1766 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1767 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1768 }
1769 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1770 break;
1771 }
1772
1773 /*
1774 * PDM Wrappers.
1775 */
1776 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1777 {
1778 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1779 return VERR_INVALID_PARAMETER;
1780 rc = PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1781 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1782 break;
1783 }
1784
1785 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1786 {
1787 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1788 return VERR_INVALID_PARAMETER;
1789 rc = PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1790 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1791 break;
1792 }
1793
1794 /*
1795 * Requests to the internal networking service.
1796 */
1797 case VMMR0_DO_INTNET_OPEN:
1798 {
1799 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1800 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1801 return VERR_INVALID_PARAMETER;
1802 rc = IntNetR0OpenReq(pSession, pReq);
1803 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1804 break;
1805 }
1806
1807 case VMMR0_DO_INTNET_IF_CLOSE:
1808 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1809 return VERR_INVALID_PARAMETER;
1810 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1811 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1812 break;
1813
1814
1815 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1816 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1817 return VERR_INVALID_PARAMETER;
1818 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1819 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1820 break;
1821
1822 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1823 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1824 return VERR_INVALID_PARAMETER;
1825 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1826 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1827 break;
1828
1829 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1830 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1831 return VERR_INVALID_PARAMETER;
1832 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1833 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1834 break;
1835
1836 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1837 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1838 return VERR_INVALID_PARAMETER;
1839 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1840 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1841 break;
1842
1843 case VMMR0_DO_INTNET_IF_SEND:
1844 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1845 return VERR_INVALID_PARAMETER;
1846 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1847 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1848 break;
1849
1850 case VMMR0_DO_INTNET_IF_WAIT:
1851 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1852 return VERR_INVALID_PARAMETER;
1853 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1854 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1855 break;
1856
1857 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1858 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1859 return VERR_INVALID_PARAMETER;
1860 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1861 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1862 break;
1863
1864#ifdef VBOX_WITH_PCI_PASSTHROUGH
1865 /*
1866 * Requests to host PCI driver service.
1867 */
1868 case VMMR0_DO_PCIRAW_REQ:
1869 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1870 return VERR_INVALID_PARAMETER;
1871 rc = PciRawR0ProcessReq(pSession, pVM, (PPCIRAWSENDREQ)pReqHdr);
1872 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1873 break;
1874#endif
1875 /*
1876 * For profiling.
1877 */
1878 case VMMR0_DO_NOP:
1879 case VMMR0_DO_SLOW_NOP:
1880 return VINF_SUCCESS;
1881
1882 /*
1883 * For testing Ring-0 APIs invoked in this environment.
1884 */
1885 case VMMR0_DO_TESTS:
1886 /** @todo make new test */
1887 return VINF_SUCCESS;
1888
1889
1890#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
1891 case VMMR0_DO_TEST_SWITCHER3264:
1892 if (idCpu == NIL_VMCPUID)
1893 return VERR_INVALID_CPU_ID;
1894 rc = HMR0TestSwitcher3264(pVM);
1895 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1896 break;
1897#endif
1898 default:
1899 /*
1900 * We're returning VERR_NOT_SUPPORT here so we've got something else
1901 * than -1 which the interrupt gate glue code might return.
1902 */
1903 Log(("operation %#x is not supported\n", enmOperation));
1904 return VERR_NOT_SUPPORTED;
1905 }
1906 return rc;
1907}
1908
1909
1910/**
1911 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1912 */
1913typedef struct VMMR0ENTRYEXARGS
1914{
1915 PVM pVM;
1916 VMCPUID idCpu;
1917 VMMR0OPERATION enmOperation;
1918 PSUPVMMR0REQHDR pReq;
1919 uint64_t u64Arg;
1920 PSUPDRVSESSION pSession;
1921} VMMR0ENTRYEXARGS;
1922/** Pointer to a vmmR0EntryExWrapper argument package. */
1923typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1924
1925/**
1926 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1927 *
1928 * @returns VBox status code.
1929 * @param pvArgs The argument package
1930 */
1931static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
1932{
1933 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1934 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1935 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1936 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1937 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1938 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1939}
1940
1941
1942/**
1943 * The Ring 0 entry point, called by the support library (SUP).
1944 *
1945 * @returns VBox status code.
1946 * @param pVM The cross context VM structure.
1947 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1948 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1949 * @param enmOperation Which operation to execute.
1950 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
1951 * @param u64Arg Some simple constant argument.
1952 * @param pSession The session of the caller.
1953 * @remarks Assume called with interrupts _enabled_.
1954 */
1955VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1956{
1957 /*
1958 * Requests that should only happen on the EMT thread will be
1959 * wrapped in a setjmp so we can assert without causing trouble.
1960 */
1961 if ( VALID_PTR(pVM)
1962 && pVM->pVMR0
1963 && idCpu < pVM->cCpus)
1964 {
1965 switch (enmOperation)
1966 {
1967 /* These might/will be called before VMMR3Init. */
1968 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1969 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1970 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1971 case VMMR0_DO_GMM_FREE_PAGES:
1972 case VMMR0_DO_GMM_BALLOONED_PAGES:
1973 /* On the mac we might not have a valid jmp buf, so check these as well. */
1974 case VMMR0_DO_VMMR0_INIT:
1975 case VMMR0_DO_VMMR0_TERM:
1976 {
1977 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1978
1979 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1980 break;
1981
1982 /** @todo validate this EMT claim... GVM knows. */
1983 VMMR0ENTRYEXARGS Args;
1984 Args.pVM = pVM;
1985 Args.idCpu = idCpu;
1986 Args.enmOperation = enmOperation;
1987 Args.pReq = pReq;
1988 Args.u64Arg = u64Arg;
1989 Args.pSession = pSession;
1990 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1991 }
1992
1993 default:
1994 break;
1995 }
1996 }
1997 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
1998}
1999
2000
2001/**
2002 * Checks whether we've armed the ring-0 long jump machinery.
2003 *
2004 * @returns @c true / @c false
2005 * @param pVCpu The cross context virtual CPU structure.
2006 * @thread EMT
2007 * @sa VMMIsLongJumpArmed
2008 */
2009VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
2010{
2011#ifdef RT_ARCH_X86
2012 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2013 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2014#else
2015 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2016 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2017#endif
2018}
2019
2020
2021/**
2022 * Checks whether we've done a ring-3 long jump.
2023 *
2024 * @returns @c true / @c false
2025 * @param pVCpu The cross context virtual CPU structure.
2026 * @thread EMT
2027 */
2028VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
2029{
2030 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2031}
2032
2033
2034/**
2035 * Internal R0 logger worker: Flush logger.
2036 *
2037 * @param pLogger The logger instance to flush.
2038 * @remark This function must be exported!
2039 */
2040VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2041{
2042#ifdef LOG_ENABLED
2043 /*
2044 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2045 * (This is a bit paranoid code.)
2046 */
2047 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2048 if ( !VALID_PTR(pR0Logger)
2049 || !VALID_PTR(pR0Logger + 1)
2050 || pLogger->u32Magic != RTLOGGER_MAGIC)
2051 {
2052# ifdef DEBUG
2053 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2054# endif
2055 return;
2056 }
2057 if (pR0Logger->fFlushingDisabled)
2058 return; /* quietly */
2059
2060 PVM pVM = pR0Logger->pVM;
2061 if ( !VALID_PTR(pVM)
2062 || pVM->pVMR0 != pVM)
2063 {
2064# ifdef DEBUG
2065 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
2066# endif
2067 return;
2068 }
2069
2070 PVMCPU pVCpu = VMMGetCpu(pVM);
2071 if (pVCpu)
2072 {
2073 /*
2074 * Check that the jump buffer is armed.
2075 */
2076# ifdef RT_ARCH_X86
2077 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2078 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2079# else
2080 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2081 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2082# endif
2083 {
2084# ifdef DEBUG
2085 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2086# endif
2087 return;
2088 }
2089 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2090 }
2091# ifdef DEBUG
2092 else
2093 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2094# endif
2095#else
2096 NOREF(pLogger);
2097#endif /* LOG_ENABLED */
2098}
2099
2100/**
2101 * Internal R0 logger worker: Custom prefix.
2102 *
2103 * @returns Number of chars written.
2104 *
2105 * @param pLogger The logger instance.
2106 * @param pchBuf The output buffer.
2107 * @param cchBuf The size of the buffer.
2108 * @param pvUser User argument (ignored).
2109 */
2110VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
2111{
2112 NOREF(pvUser);
2113#ifdef LOG_ENABLED
2114 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2115 if ( !VALID_PTR(pR0Logger)
2116 || !VALID_PTR(pR0Logger + 1)
2117 || pLogger->u32Magic != RTLOGGER_MAGIC
2118 || cchBuf < 2)
2119 return 0;
2120
2121 static const char s_szHex[17] = "0123456789abcdef";
2122 VMCPUID const idCpu = pR0Logger->idCpu;
2123 pchBuf[1] = s_szHex[ idCpu & 15];
2124 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
2125
2126 return 2;
2127#else
2128 NOREF(pLogger); NOREF(pchBuf); NOREF(cchBuf);
2129 return 0;
2130#endif
2131}
2132
2133#ifdef LOG_ENABLED
2134
2135/**
2136 * Disables flushing of the ring-0 debug log.
2137 *
2138 * @param pVCpu The cross context virtual CPU structure.
2139 */
2140VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
2141{
2142 if (pVCpu->vmm.s.pR0LoggerR0)
2143 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2144}
2145
2146
2147/**
2148 * Enables flushing of the ring-0 debug log.
2149 *
2150 * @param pVCpu The cross context virtual CPU structure.
2151 */
2152VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
2153{
2154 if (pVCpu->vmm.s.pR0LoggerR0)
2155 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2156}
2157
2158
2159/**
2160 * Checks if log flushing is disabled or not.
2161 *
2162 * @param pVCpu The cross context virtual CPU structure.
2163 */
2164VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
2165{
2166 if (pVCpu->vmm.s.pR0LoggerR0)
2167 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2168 return true;
2169}
2170#endif /* LOG_ENABLED */
2171
2172/**
2173 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2174 *
2175 * @returns true if the breakpoint should be hit, false if it should be ignored.
2176 */
2177DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2178{
2179#if 0
2180 return true;
2181#else
2182 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2183 if (pVM)
2184 {
2185 PVMCPU pVCpu = VMMGetCpu(pVM);
2186
2187 if (pVCpu)
2188 {
2189#ifdef RT_ARCH_X86
2190 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2191 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2192#else
2193 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2194 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2195#endif
2196 {
2197 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2198 return RT_FAILURE_NP(rc);
2199 }
2200 }
2201 }
2202#ifdef RT_OS_LINUX
2203 return true;
2204#else
2205 return false;
2206#endif
2207#endif
2208}
2209
2210
2211/**
2212 * Override this so we can push it up to ring-3.
2213 *
2214 * @param pszExpr Expression. Can be NULL.
2215 * @param uLine Location line number.
2216 * @param pszFile Location file name.
2217 * @param pszFunction Location function name.
2218 */
2219DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2220{
2221 /*
2222 * To the log.
2223 */
2224 LogAlways(("\n!!R0-Assertion Failed!!\n"
2225 "Expression: %s\n"
2226 "Location : %s(%d) %s\n",
2227 pszExpr, pszFile, uLine, pszFunction));
2228
2229 /*
2230 * To the global VMM buffer.
2231 */
2232 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2233 if (pVM)
2234 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2235 "\n!!R0-Assertion Failed!!\n"
2236 "Expression: %s\n"
2237 "Location : %s(%d) %s\n",
2238 pszExpr, pszFile, uLine, pszFunction);
2239
2240 /*
2241 * Continue the normal way.
2242 */
2243 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2244}
2245
2246
2247/**
2248 * Callback for RTLogFormatV which writes to the ring-3 log port.
2249 * See PFNLOGOUTPUT() for details.
2250 */
2251static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2252{
2253 for (size_t i = 0; i < cbChars; i++)
2254 {
2255 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2256 }
2257
2258 NOREF(pv);
2259 return cbChars;
2260}
2261
2262
2263/**
2264 * Override this so we can push it up to ring-3.
2265 *
2266 * @param pszFormat The format string.
2267 * @param va Arguments.
2268 */
2269DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2270{
2271 va_list vaCopy;
2272
2273 /*
2274 * Push the message to the loggers.
2275 */
2276 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2277 if (pLog)
2278 {
2279 va_copy(vaCopy, va);
2280 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2281 va_end(vaCopy);
2282 }
2283 pLog = RTLogRelGetDefaultInstance();
2284 if (pLog)
2285 {
2286 va_copy(vaCopy, va);
2287 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2288 va_end(vaCopy);
2289 }
2290
2291 /*
2292 * Push it to the global VMM buffer.
2293 */
2294 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2295 if (pVM)
2296 {
2297 va_copy(vaCopy, va);
2298 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2299 va_end(vaCopy);
2300 }
2301
2302 /*
2303 * Continue the normal way.
2304 */
2305 RTAssertMsg2V(pszFormat, va);
2306}
2307
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette