VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 60307

Last change on this file since 60307 was 60307, checked in by vboxsync, 9 years ago

VMM: APIC rewrite. Initial commit, work in progress.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 81.2 KB
Line 
1/* $Id: VMMR0.cpp 60307 2016-04-04 15:23:11Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/stam.h>
30#include <VBox/vmm/tm.h>
31#include "VMMInternal.h"
32#include <VBox/vmm/vm.h>
33#ifdef VBOX_WITH_PCI_PASSTHROUGH
34# include <VBox/vmm/pdmpci.h>
35#endif
36#ifdef VBOX_WITH_NEW_APIC
37# include <VBox/vmm/apic.h>
38#endif
39
40#include <VBox/vmm/gvmm.h>
41#include <VBox/vmm/gmm.h>
42#include <VBox/vmm/gim.h>
43#include <VBox/intnet.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/param.h>
46#include <VBox/err.h>
47#include <VBox/version.h>
48#include <VBox/log.h>
49
50#include <iprt/asm-amd64-x86.h>
51#include <iprt/assert.h>
52#include <iprt/crc.h>
53#include <iprt/mp.h>
54#include <iprt/once.h>
55#include <iprt/stdarg.h>
56#include <iprt/string.h>
57#include <iprt/thread.h>
58#include <iprt/timer.h>
59
60#include "dtrace/VBoxVMM.h"
61
62
63#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
64# pragma intrinsic(_AddressOfReturnAddress)
65#endif
66
67#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
68# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
69#endif
70
71
72
73/*********************************************************************************************************************************
74* Defined Constants And Macros *
75*********************************************************************************************************************************/
76/** @def VMM_CHECK_SMAP_SETUP
77 * SMAP check setup. */
78/** @def VMM_CHECK_SMAP_CHECK
79 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
80 * it will be logged and @a a_BadExpr is executed. */
81/** @def VMM_CHECK_SMAP_CHECK2
82 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
83 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
84 * executed. */
85#if defined(VBOX_STRICT) || 1
86# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
87# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
88 do { \
89 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
90 { \
91 RTCCUINTREG fEflCheck = ASMGetFlags(); \
92 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
93 { /* likely */ } \
94 else \
95 { \
96 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
97 a_BadExpr; \
98 } \
99 } \
100 } while (0)
101# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
102 do { \
103 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
104 { \
105 RTCCUINTREG fEflCheck = ASMGetFlags(); \
106 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
107 { /* likely */ } \
108 else \
109 { \
110 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
111 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
112 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
113 a_BadExpr; \
114 } \
115 } \
116 } while (0)
117#else
118# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
119# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
120# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) NOREF(fKernelFeatures)
121#endif
122
123
124/*********************************************************************************************************************************
125* Internal Functions *
126*********************************************************************************************************************************/
127RT_C_DECLS_BEGIN
128#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
129extern uint64_t __udivdi3(uint64_t, uint64_t);
130extern uint64_t __umoddi3(uint64_t, uint64_t);
131#endif
132RT_C_DECLS_END
133
134
135/*********************************************************************************************************************************
136* Global Variables *
137*********************************************************************************************************************************/
138/** Drag in necessary library bits.
139 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
140PFNRT g_VMMR0Deps[] =
141{
142 (PFNRT)RTCrc32,
143 (PFNRT)RTOnce,
144#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
145 (PFNRT)__udivdi3,
146 (PFNRT)__umoddi3,
147#endif
148 NULL
149};
150
151#ifdef RT_OS_SOLARIS
152/* Dependency information for the native solaris loader. */
153extern "C" { char _depends_on[] = "vboxdrv"; }
154#endif
155
156
157
158/**
159 * Initialize the module.
160 * This is called when we're first loaded.
161 *
162 * @returns 0 on success.
163 * @returns VBox status on failure.
164 * @param hMod Image handle for use in APIs.
165 */
166DECLEXPORT(int) ModuleInit(void *hMod)
167{
168 VMM_CHECK_SMAP_SETUP();
169 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
170
171#ifdef VBOX_WITH_DTRACE_R0
172 /*
173 * The first thing to do is register the static tracepoints.
174 * (Deregistration is automatic.)
175 */
176 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
177 if (RT_FAILURE(rc2))
178 return rc2;
179#endif
180 LogFlow(("ModuleInit:\n"));
181
182#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
183 /*
184 * Display the CMOS debug code.
185 */
186 ASMOutU8(0x72, 0x03);
187 uint8_t bDebugCode = ASMInU8(0x73);
188 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
189 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
190#endif
191
192 /*
193 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
194 */
195 int rc = vmmInitFormatTypes();
196 if (RT_SUCCESS(rc))
197 {
198 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
199 rc = GVMMR0Init();
200 if (RT_SUCCESS(rc))
201 {
202 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
203 rc = GMMR0Init();
204 if (RT_SUCCESS(rc))
205 {
206 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
207 rc = HMR0Init();
208 if (RT_SUCCESS(rc))
209 {
210 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
211 rc = PGMRegisterStringFormatTypes();
212 if (RT_SUCCESS(rc))
213 {
214 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
215#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
216 rc = PGMR0DynMapInit();
217#endif
218 if (RT_SUCCESS(rc))
219 {
220 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
221 rc = IntNetR0Init();
222 if (RT_SUCCESS(rc))
223 {
224#ifdef VBOX_WITH_PCI_PASSTHROUGH
225 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
226 rc = PciRawR0Init();
227#endif
228 if (RT_SUCCESS(rc))
229 {
230 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
231 rc = CPUMR0ModuleInit();
232 if (RT_SUCCESS(rc))
233 {
234#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
235 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
236 rc = vmmR0TripleFaultHackInit();
237 if (RT_SUCCESS(rc))
238#endif
239 {
240 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
241 if (RT_SUCCESS(rc))
242 {
243 LogFlow(("ModuleInit: returns success.\n"));
244 return VINF_SUCCESS;
245 }
246 }
247
248 /*
249 * Bail out.
250 */
251#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
252 vmmR0TripleFaultHackTerm();
253#endif
254 }
255 else
256 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
257#ifdef VBOX_WITH_PCI_PASSTHROUGH
258 PciRawR0Term();
259#endif
260 }
261 else
262 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
263 IntNetR0Term();
264 }
265 else
266 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
267#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
268 PGMR0DynMapTerm();
269#endif
270 }
271 else
272 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
273 PGMDeregisterStringFormatTypes();
274 }
275 else
276 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
277 HMR0Term();
278 }
279 else
280 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
281 GMMR0Term();
282 }
283 else
284 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
285 GVMMR0Term();
286 }
287 else
288 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
289 vmmTermFormatTypes();
290 }
291 else
292 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
293
294 LogFlow(("ModuleInit: failed %Rrc\n", rc));
295 return rc;
296}
297
298
299/**
300 * Terminate the module.
301 * This is called when we're finally unloaded.
302 *
303 * @param hMod Image handle for use in APIs.
304 */
305DECLEXPORT(void) ModuleTerm(void *hMod)
306{
307 NOREF(hMod);
308 LogFlow(("ModuleTerm:\n"));
309
310 /*
311 * Terminate the CPUM module (Local APIC cleanup).
312 */
313 CPUMR0ModuleTerm();
314
315 /*
316 * Terminate the internal network service.
317 */
318 IntNetR0Term();
319
320 /*
321 * PGM (Darwin), HM and PciRaw global cleanup.
322 */
323#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
324 PGMR0DynMapTerm();
325#endif
326#ifdef VBOX_WITH_PCI_PASSTHROUGH
327 PciRawR0Term();
328#endif
329 PGMDeregisterStringFormatTypes();
330 HMR0Term();
331#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
332 vmmR0TripleFaultHackTerm();
333#endif
334
335 /*
336 * Destroy the GMM and GVMM instances.
337 */
338 GMMR0Term();
339 GVMMR0Term();
340
341 vmmTermFormatTypes();
342
343 LogFlow(("ModuleTerm: returns\n"));
344}
345
346
347/**
348 * Initiates the R0 driver for a particular VM instance.
349 *
350 * @returns VBox status code.
351 *
352 * @param pVM The cross context VM structure.
353 * @param uSvnRev The SVN revision of the ring-3 part.
354 * @param uBuildType Build type indicator.
355 * @thread EMT.
356 */
357static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
358{
359 VMM_CHECK_SMAP_SETUP();
360 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
361
362 /*
363 * Match the SVN revisions and build type.
364 */
365 if (uSvnRev != VMMGetSvnRev())
366 {
367 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
368 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
369 return VERR_VMM_R0_VERSION_MISMATCH;
370 }
371 if (uBuildType != vmmGetBuildType())
372 {
373 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
374 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
375 return VERR_VMM_R0_VERSION_MISMATCH;
376 }
377 if ( !VALID_PTR(pVM)
378 || pVM->pVMR0 != pVM)
379 return VERR_INVALID_PARAMETER;
380
381
382#ifdef LOG_ENABLED
383 /*
384 * Register the EMT R0 logger instance for VCPU 0.
385 */
386 PVMCPU pVCpu = &pVM->aCpus[0];
387
388 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
389 if (pR0Logger)
390 {
391# if 0 /* testing of the logger. */
392 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
393 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
394 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
395 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
396
397 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
398 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
399 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
400 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
401
402 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
403 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
404 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
405 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
406
407 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
408 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
409 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
410 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
411 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
412 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
413
414 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
415 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
416
417 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
418 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
419 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
420# endif
421 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
422 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
423 pR0Logger->fRegistered = true;
424 }
425#endif /* LOG_ENABLED */
426
427 /*
428 * Check if the host supports high resolution timers or not.
429 */
430 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
431 && !RTTimerCanDoHighResolution())
432 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
433
434 /*
435 * Initialize the per VM data for GVMM and GMM.
436 */
437 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
438 int rc = GVMMR0InitVM(pVM);
439// if (RT_SUCCESS(rc))
440// rc = GMMR0InitPerVMData(pVM);
441 if (RT_SUCCESS(rc))
442 {
443 /*
444 * Init HM, CPUM and PGM (Darwin only).
445 */
446 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
447 rc = HMR0InitVM(pVM);
448 if (RT_SUCCESS(rc))
449 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
450 if (RT_SUCCESS(rc))
451 {
452 rc = CPUMR0InitVM(pVM);
453 if (RT_SUCCESS(rc))
454 {
455 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
456#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
457 rc = PGMR0DynMapInitVM(pVM);
458#endif
459 if (RT_SUCCESS(rc))
460 {
461 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
462#ifdef VBOX_WITH_PCI_PASSTHROUGH
463 rc = PciRawR0InitVM(pVM);
464#endif
465 if (RT_SUCCESS(rc))
466 {
467 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
468 rc = GIMR0InitVM(pVM);
469 if (RT_SUCCESS(rc))
470 {
471#ifdef VBOX_WITH_NEW_APIC
472 rc = APICR0InitVM(pVM);
473#endif
474 if (RT_SUCCESS(rc))
475 {
476 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
477 if (RT_SUCCESS(rc))
478 {
479 GVMMR0DoneInitVM(pVM);
480 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
481 return rc;
482 }
483#ifdef VBOX_WITH_NEW_APIC
484 APICR0TermVM(pVM);
485#endif
486 }
487
488 /* bail out*/
489 GIMR0TermVM(pVM);
490 }
491#ifdef VBOX_WITH_PCI_PASSTHROUGH
492 PciRawR0TermVM(pVM);
493#endif
494 }
495 }
496 }
497 HMR0TermVM(pVM);
498 }
499 }
500
501 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
502 return rc;
503}
504
505
506/**
507 * Terminates the R0 bits for a particular VM instance.
508 *
509 * This is normally called by ring-3 as part of the VM termination process, but
510 * may alternatively be called during the support driver session cleanup when
511 * the VM object is destroyed (see GVMM).
512 *
513 * @returns VBox status code.
514 *
515 * @param pVM The cross context VM structure.
516 * @param pGVM Pointer to the global VM structure. Optional.
517 * @thread EMT or session clean up thread.
518 */
519VMMR0_INT_DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
520{
521#ifdef VBOX_WITH_PCI_PASSTHROUGH
522 PciRawR0TermVM(pVM);
523#endif
524
525 /*
526 * Tell GVMM what we're up to and check that we only do this once.
527 */
528 if (GVMMR0DoingTermVM(pVM, pGVM))
529 {
530 GIMR0TermVM(pVM);
531
532 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
533 * here to make sure we don't leak any shared pages if we crash... */
534#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
535 PGMR0DynMapTermVM(pVM);
536#endif
537 HMR0TermVM(pVM);
538 }
539
540 /*
541 * Deregister the logger.
542 */
543 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
544 return VINF_SUCCESS;
545}
546
547
548/**
549 * VMM ring-0 thread-context callback.
550 *
551 * This does common HM state updating and calls the HM-specific thread-context
552 * callback.
553 *
554 * @param enmEvent The thread-context event.
555 * @param pvUser Opaque pointer to the VMCPU.
556 *
557 * @thread EMT(pvUser)
558 */
559static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
560{
561 PVMCPU pVCpu = (PVMCPU)pvUser;
562
563 switch (enmEvent)
564 {
565 case RTTHREADCTXEVENT_IN:
566 {
567 /*
568 * Linux may call us with preemption enabled (really!) but technically we
569 * cannot get preempted here, otherwise we end up in an infinite recursion
570 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
571 * ad infinitum). Let's just disable preemption for now...
572 */
573 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
574 * preemption after doing the callout (one or two functions up the
575 * call chain). */
576 /** @todo r=ramshankar: See @bugref{5313#c30}. */
577 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
578 RTThreadPreemptDisable(&ParanoidPreemptState);
579
580 /* We need to update the VCPU <-> host CPU mapping. */
581 RTCPUID idHostCpu;
582 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
583 pVCpu->iHostCpuSet = iHostCpuSet;
584 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
585
586 /* In the very unlikely event that the GIP delta for the CPU we're
587 rescheduled needs calculating, try force a return to ring-3.
588 We unfortunately cannot do the measurements right here. */
589 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
590 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
591
592 /* Invoke the HM-specific thread-context callback. */
593 HMR0ThreadCtxCallback(enmEvent, pvUser);
594
595 /* Restore preemption. */
596 RTThreadPreemptRestore(&ParanoidPreemptState);
597 break;
598 }
599
600 case RTTHREADCTXEVENT_OUT:
601 {
602 /* Invoke the HM-specific thread-context callback. */
603 HMR0ThreadCtxCallback(enmEvent, pvUser);
604
605 /*
606 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
607 * have the same host CPU associated with it.
608 */
609 pVCpu->iHostCpuSet = UINT32_MAX;
610 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
611 break;
612 }
613
614 default:
615 /* Invoke the HM-specific thread-context callback. */
616 HMR0ThreadCtxCallback(enmEvent, pvUser);
617 break;
618 }
619}
620
621
622/**
623 * Creates thread switching hook for the current EMT thread.
624 *
625 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
626 * platform does not implement switcher hooks, no hooks will be create and the
627 * member set to NIL_RTTHREADCTXHOOK.
628 *
629 * @returns VBox status code.
630 * @param pVCpu The cross context virtual CPU structure.
631 * @thread EMT(pVCpu)
632 */
633VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu)
634{
635 VMCPU_ASSERT_EMT(pVCpu);
636 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
637
638 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
639 if (RT_SUCCESS(rc))
640 return rc;
641
642 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
643 if (rc == VERR_NOT_SUPPORTED)
644 return VINF_SUCCESS;
645
646 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
647 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
648}
649
650
651/**
652 * Destroys the thread switching hook for the specified VCPU.
653 *
654 * @param pVCpu The cross context virtual CPU structure.
655 * @remarks Can be called from any thread.
656 */
657VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu)
658{
659 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
660 AssertRC(rc);
661}
662
663
664/**
665 * Disables the thread switching hook for this VCPU (if we got one).
666 *
667 * @param pVCpu The cross context virtual CPU structure.
668 * @thread EMT(pVCpu)
669 *
670 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
671 * this call. This means you have to be careful with what you do!
672 */
673VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu)
674{
675 /*
676 * Clear the VCPU <-> host CPU mapping as we've left HM context.
677 * @bugref{7726#c19} explains the need for this trick:
678 *
679 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
680 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
681 * longjmp & normal return to ring-3, which opens a window where we may be
682 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
683 * the CPU starts executing a different EMT. Both functions first disables
684 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
685 * an opening for getting preempted.
686 */
687 /** @todo Make HM not need this API! Then we could leave the hooks enabled
688 * all the time. */
689 /** @todo move this into the context hook disabling if(). */
690 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
691
692 /*
693 * Disable the context hook, if we got one.
694 */
695 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
696 {
697 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
698 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
699 AssertRC(rc);
700 }
701}
702
703
704/**
705 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
706 *
707 * @returns true if registered, false otherwise.
708 * @param pVCpu The cross context virtual CPU structure.
709 */
710DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
711{
712 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
713}
714
715
716/**
717 * Whether thread-context hooks are registered for this VCPU.
718 *
719 * @returns true if registered, false otherwise.
720 * @param pVCpu The cross context virtual CPU structure.
721 */
722VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
723{
724 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
725}
726
727
728#ifdef VBOX_WITH_STATISTICS
729/**
730 * Record return code statistics
731 * @param pVM The cross context VM structure.
732 * @param pVCpu The cross context virtual CPU structure.
733 * @param rc The status code.
734 */
735static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
736{
737 /*
738 * Collect statistics.
739 */
740 switch (rc)
741 {
742 case VINF_SUCCESS:
743 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
744 break;
745 case VINF_EM_RAW_INTERRUPT:
746 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
747 break;
748 case VINF_EM_RAW_INTERRUPT_HYPER:
749 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
750 break;
751 case VINF_EM_RAW_GUEST_TRAP:
752 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
753 break;
754 case VINF_EM_RAW_RING_SWITCH:
755 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
756 break;
757 case VINF_EM_RAW_RING_SWITCH_INT:
758 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
759 break;
760 case VINF_EM_RAW_STALE_SELECTOR:
761 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
762 break;
763 case VINF_EM_RAW_IRET_TRAP:
764 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
765 break;
766 case VINF_IOM_R3_IOPORT_READ:
767 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
768 break;
769 case VINF_IOM_R3_IOPORT_WRITE:
770 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
771 break;
772 case VINF_IOM_R3_MMIO_READ:
773 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
774 break;
775 case VINF_IOM_R3_MMIO_WRITE:
776 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
777 break;
778 case VINF_IOM_R3_MMIO_READ_WRITE:
779 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
780 break;
781 case VINF_PATM_HC_MMIO_PATCH_READ:
782 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
783 break;
784 case VINF_PATM_HC_MMIO_PATCH_WRITE:
785 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
786 break;
787 case VINF_CPUM_R3_MSR_READ:
788 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
789 break;
790 case VINF_CPUM_R3_MSR_WRITE:
791 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
792 break;
793 case VINF_EM_RAW_EMULATE_INSTR:
794 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
795 break;
796 case VINF_EM_RAW_EMULATE_IO_BLOCK:
797 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
798 break;
799 case VINF_PATCH_EMULATE_INSTR:
800 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
801 break;
802 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
803 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
804 break;
805 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
806 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
807 break;
808 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
809 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
810 break;
811 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
812 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
813 break;
814 case VINF_CSAM_PENDING_ACTION:
815 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
816 break;
817 case VINF_PGM_SYNC_CR3:
818 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
819 break;
820 case VINF_PATM_PATCH_INT3:
821 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
822 break;
823 case VINF_PATM_PATCH_TRAP_PF:
824 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
825 break;
826 case VINF_PATM_PATCH_TRAP_GP:
827 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
828 break;
829 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
830 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
831 break;
832 case VINF_EM_RESCHEDULE_REM:
833 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
834 break;
835 case VINF_EM_RAW_TO_R3:
836 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
837 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
838 else if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
839 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
840 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES))
841 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
842 else if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
843 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
844 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
845 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
846 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))
847 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
848 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
849 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
850 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3))
851 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
852 else
853 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
854 break;
855
856 case VINF_EM_RAW_TIMER_PENDING:
857 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
858 break;
859 case VINF_EM_RAW_INTERRUPT_PENDING:
860 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
861 break;
862 case VINF_VMM_CALL_HOST:
863 switch (pVCpu->vmm.s.enmCallRing3Operation)
864 {
865 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
866 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
867 break;
868 case VMMCALLRING3_PDM_LOCK:
869 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
870 break;
871 case VMMCALLRING3_PGM_POOL_GROW:
872 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
873 break;
874 case VMMCALLRING3_PGM_LOCK:
875 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
876 break;
877 case VMMCALLRING3_PGM_MAP_CHUNK:
878 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
879 break;
880 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
881 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
882 break;
883 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
884 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
885 break;
886 case VMMCALLRING3_VMM_LOGGER_FLUSH:
887 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
888 break;
889 case VMMCALLRING3_VM_SET_ERROR:
890 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
891 break;
892 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
893 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
894 break;
895 case VMMCALLRING3_VM_R0_ASSERTION:
896 default:
897 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
898 break;
899 }
900 break;
901 case VINF_PATM_DUPLICATE_FUNCTION:
902 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
903 break;
904 case VINF_PGM_CHANGE_MODE:
905 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
906 break;
907 case VINF_PGM_POOL_FLUSH_PENDING:
908 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
909 break;
910 case VINF_EM_PENDING_REQUEST:
911 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
912 break;
913 case VINF_EM_HM_PATCH_TPR_INSTR:
914 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
915 break;
916 default:
917 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
918 break;
919 }
920}
921#endif /* VBOX_WITH_STATISTICS */
922
923
924/**
925 * The Ring 0 entry point, called by the fast-ioctl path.
926 *
927 * @param pVM The cross context VM structure.
928 * The return code is stored in pVM->vmm.s.iLastGZRc.
929 * @param idCpu The Virtual CPU ID of the calling EMT.
930 * @param enmOperation Which operation to execute.
931 * @remarks Assume called with interrupts _enabled_.
932 */
933VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
934{
935 /*
936 * Validation.
937 */
938 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
939 return;
940 PVMCPU pVCpu = &pVM->aCpus[idCpu];
941 if (RT_UNLIKELY(pVCpu->hNativeThreadR0 != RTThreadNativeSelf()))
942 return;
943 VMM_CHECK_SMAP_SETUP();
944 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
945
946 /*
947 * Perform requested operation.
948 */
949 switch (enmOperation)
950 {
951 /*
952 * Switch to GC and run guest raw mode code.
953 * Disable interrupts before doing the world switch.
954 */
955 case VMMR0_DO_RAW_RUN:
956 {
957#ifdef VBOX_WITH_RAW_MODE
958# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
959 /* Some safety precautions first. */
960 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
961 {
962 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
963 break;
964 }
965# endif
966
967 /*
968 * Disable preemption.
969 */
970 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
971 RTThreadPreemptDisable(&PreemptState);
972
973 /*
974 * Get the host CPU identifiers, make sure they are valid and that
975 * we've got a TSC delta for the CPU.
976 */
977 RTCPUID idHostCpu;
978 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
979 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
980 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
981 {
982 /*
983 * Commit the CPU identifiers and update the periodict preemption timer if it's active.
984 */
985# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
986 CPUMR0SetLApic(pVCpu, iHostCpuSet);
987# endif
988 pVCpu->iHostCpuSet = iHostCpuSet;
989 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
990
991 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
992 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
993
994 /*
995 * We might need to disable VT-x if the active switcher turns off paging.
996 */
997 bool fVTxDisabled;
998 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
999 if (RT_SUCCESS(rc))
1000 {
1001 /*
1002 * Disable interrupts and run raw-mode code. The loop is for efficiently
1003 * dispatching tracepoints that fired in raw-mode context.
1004 */
1005 RTCCUINTREG uFlags = ASMIntDisableFlags();
1006
1007 for (;;)
1008 {
1009 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
1010 TMNotifyStartOfExecution(pVCpu);
1011
1012 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1013 pVCpu->vmm.s.iLastGZRc = rc;
1014
1015 TMNotifyEndOfExecution(pVCpu);
1016 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1017
1018 if (rc != VINF_VMM_CALL_TRACER)
1019 break;
1020 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
1021 }
1022
1023 /*
1024 * Re-enable VT-x before we dispatch any pending host interrupts and
1025 * re-enables interrupts.
1026 */
1027 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1028
1029 if ( rc == VINF_EM_RAW_INTERRUPT
1030 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1031 TRPMR0DispatchHostInterrupt(pVM);
1032
1033 ASMSetFlags(uFlags);
1034
1035 /* Fire dtrace probe and collect statistics. */
1036 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1037# ifdef VBOX_WITH_STATISTICS
1038 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1039 vmmR0RecordRC(pVM, pVCpu, rc);
1040# endif
1041 }
1042 else
1043 pVCpu->vmm.s.iLastGZRc = rc;
1044
1045 /*
1046 * Invalidate the host CPU identifiers as we restore preemption.
1047 */
1048 pVCpu->iHostCpuSet = UINT32_MAX;
1049 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1050
1051 RTThreadPreemptRestore(&PreemptState);
1052 }
1053 /*
1054 * Invalid CPU set index or TSC delta in need of measuring.
1055 */
1056 else
1057 {
1058 RTThreadPreemptRestore(&PreemptState);
1059 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1060 {
1061 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1062 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1063 0 /*default cTries*/);
1064 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1065 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1066 else
1067 pVCpu->vmm.s.iLastGZRc = rc;
1068 }
1069 else
1070 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1071 }
1072
1073#else /* !VBOX_WITH_RAW_MODE */
1074 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_NOT_SUPPORTED;
1075#endif
1076 break;
1077 }
1078
1079 /*
1080 * Run guest code using the available hardware acceleration technology.
1081 */
1082 case VMMR0_DO_HM_RUN:
1083 {
1084 /*
1085 * Disable preemption.
1086 */
1087 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1088 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1089 RTThreadPreemptDisable(&PreemptState);
1090
1091 /*
1092 * Get the host CPU identifiers, make sure they are valid and that
1093 * we've got a TSC delta for the CPU.
1094 */
1095 RTCPUID idHostCpu;
1096 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1097 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1098 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1099 {
1100 pVCpu->iHostCpuSet = iHostCpuSet;
1101 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1102
1103 /*
1104 * Update the periodic preemption timer if it's active.
1105 */
1106 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1107 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1108 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1109
1110#ifdef LOG_ENABLED
1111 /*
1112 * Ugly: Lazy registration of ring 0 loggers.
1113 */
1114 if (pVCpu->idCpu > 0)
1115 {
1116 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
1117 if ( pR0Logger
1118 && RT_UNLIKELY(!pR0Logger->fRegistered))
1119 {
1120 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
1121 pR0Logger->fRegistered = true;
1122 }
1123 }
1124#endif
1125
1126 int rc;
1127 bool fPreemptRestored = false;
1128 if (!HMR0SuspendPending())
1129 {
1130 /*
1131 * Enable the context switching hook.
1132 */
1133 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1134 {
1135 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1136 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1137 }
1138
1139 /*
1140 * Enter HM context.
1141 */
1142 rc = HMR0Enter(pVM, pVCpu);
1143 if (RT_SUCCESS(rc))
1144 {
1145 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1146
1147 /*
1148 * When preemption hooks are in place, enable preemption now that
1149 * we're in HM context.
1150 */
1151 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1152 {
1153 fPreemptRestored = true;
1154 RTThreadPreemptRestore(&PreemptState);
1155 }
1156
1157 /*
1158 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1159 */
1160 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1161 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1162 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1163
1164 /*
1165 * Assert sanity on the way out. Using manual assertions code here as normal
1166 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1167 */
1168 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1169 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1170 {
1171 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1172 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1173 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1174 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1175 }
1176 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1177 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1178 {
1179 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1180 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1181 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1182 rc = VERR_INVALID_STATE;
1183 }
1184
1185 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1186 }
1187 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1188
1189 /*
1190 * Invalidate the host CPU identifiers before we disable the context
1191 * hook / restore preemption.
1192 */
1193 pVCpu->iHostCpuSet = UINT32_MAX;
1194 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1195
1196 /*
1197 * Disable context hooks. Due to unresolved cleanup issues, we
1198 * cannot leave the hooks enabled when we return to ring-3.
1199 *
1200 * Note! At the moment HM may also have disabled the hook
1201 * when we get here, but the IPRT API handles that.
1202 */
1203 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1204 {
1205 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1206 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1207 }
1208 }
1209 /*
1210 * The system is about to go into suspend mode; go back to ring 3.
1211 */
1212 else
1213 {
1214 rc = VINF_EM_RAW_INTERRUPT;
1215 pVCpu->iHostCpuSet = UINT32_MAX;
1216 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1217 }
1218
1219 /** @todo When HM stops messing with the context hook state, we'll disable
1220 * preemption again before the RTThreadCtxHookDisable call. */
1221 if (!fPreemptRestored)
1222 RTThreadPreemptRestore(&PreemptState);
1223
1224 pVCpu->vmm.s.iLastGZRc = rc;
1225
1226 /* Fire dtrace probe and collect statistics. */
1227 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1228#ifdef VBOX_WITH_STATISTICS
1229 vmmR0RecordRC(pVM, pVCpu, rc);
1230#endif
1231 }
1232 /*
1233 * Invalid CPU set index or TSC delta in need of measuring.
1234 */
1235 else
1236 {
1237 pVCpu->iHostCpuSet = UINT32_MAX;
1238 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1239 RTThreadPreemptRestore(&PreemptState);
1240 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1241 {
1242 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1243 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1244 0 /*default cTries*/);
1245 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1246 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1247 else
1248 pVCpu->vmm.s.iLastGZRc = rc;
1249 }
1250 else
1251 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1252 }
1253 break;
1254 }
1255
1256 /*
1257 * For profiling.
1258 */
1259 case VMMR0_DO_NOP:
1260 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1261 break;
1262
1263 /*
1264 * Impossible.
1265 */
1266 default:
1267 AssertMsgFailed(("%#x\n", enmOperation));
1268 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1269 break;
1270 }
1271 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1272}
1273
1274
1275/**
1276 * Validates a session or VM session argument.
1277 *
1278 * @returns true / false accordingly.
1279 * @param pVM The cross context VM structure.
1280 * @param pClaimedSession The session claim to validate.
1281 * @param pSession The session argument.
1282 */
1283DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1284{
1285 /* This must be set! */
1286 if (!pSession)
1287 return false;
1288
1289 /* Only one out of the two. */
1290 if (pVM && pClaimedSession)
1291 return false;
1292 if (pVM)
1293 pClaimedSession = pVM->pSession;
1294 return pClaimedSession == pSession;
1295}
1296
1297
1298/**
1299 * VMMR0EntryEx worker function, either called directly or when ever possible
1300 * called thru a longjmp so we can exit safely on failure.
1301 *
1302 * @returns VBox status code.
1303 * @param pVM The cross context VM structure.
1304 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1305 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1306 * @param enmOperation Which operation to execute.
1307 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1308 * The support driver validates this if it's present.
1309 * @param u64Arg Some simple constant argument.
1310 * @param pSession The session of the caller.
1311 * @remarks Assume called with interrupts _enabled_.
1312 */
1313static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1314{
1315 /*
1316 * Common VM pointer validation.
1317 */
1318 if (pVM)
1319 {
1320 if (RT_UNLIKELY( !VALID_PTR(pVM)
1321 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
1322 {
1323 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
1324 return VERR_INVALID_POINTER;
1325 }
1326 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
1327 || pVM->enmVMState > VMSTATE_TERMINATED
1328 || pVM->pVMR0 != pVM))
1329 {
1330 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
1331 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
1332 return VERR_INVALID_POINTER;
1333 }
1334
1335 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
1336 {
1337 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
1338 return VERR_INVALID_PARAMETER;
1339 }
1340 }
1341 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
1342 {
1343 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1344 return VERR_INVALID_PARAMETER;
1345 }
1346 VMM_CHECK_SMAP_SETUP();
1347 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1348 int rc;
1349
1350 switch (enmOperation)
1351 {
1352 /*
1353 * GVM requests
1354 */
1355 case VMMR0_DO_GVMM_CREATE_VM:
1356 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
1357 return VERR_INVALID_PARAMETER;
1358 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
1359 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1360 break;
1361
1362 case VMMR0_DO_GVMM_DESTROY_VM:
1363 if (pReqHdr || u64Arg)
1364 return VERR_INVALID_PARAMETER;
1365 rc = GVMMR0DestroyVM(pVM);
1366 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1367 break;
1368
1369 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1370 {
1371 if (!pVM)
1372 return VERR_INVALID_PARAMETER;
1373 rc = GVMMR0RegisterVCpu(pVM, idCpu);
1374 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1375 break;
1376 }
1377
1378 case VMMR0_DO_GVMM_SCHED_HALT:
1379 if (pReqHdr)
1380 return VERR_INVALID_PARAMETER;
1381 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1382 rc = GVMMR0SchedHalt(pVM, idCpu, u64Arg);
1383 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1384 break;
1385
1386 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1387 if (pReqHdr || u64Arg)
1388 return VERR_INVALID_PARAMETER;
1389 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1390 rc = GVMMR0SchedWakeUp(pVM, idCpu);
1391 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1392 break;
1393
1394 case VMMR0_DO_GVMM_SCHED_POKE:
1395 if (pReqHdr || u64Arg)
1396 return VERR_INVALID_PARAMETER;
1397 rc = GVMMR0SchedPoke(pVM, idCpu);
1398 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1399 break;
1400
1401 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1402 if (u64Arg)
1403 return VERR_INVALID_PARAMETER;
1404 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1405 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1406 break;
1407
1408 case VMMR0_DO_GVMM_SCHED_POLL:
1409 if (pReqHdr || u64Arg > 1)
1410 return VERR_INVALID_PARAMETER;
1411 rc = GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
1412 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1413 break;
1414
1415 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1416 if (u64Arg)
1417 return VERR_INVALID_PARAMETER;
1418 rc = GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
1419 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1420 break;
1421
1422 case VMMR0_DO_GVMM_RESET_STATISTICS:
1423 if (u64Arg)
1424 return VERR_INVALID_PARAMETER;
1425 rc = GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
1426 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1427 break;
1428
1429 /*
1430 * Initialize the R0 part of a VM instance.
1431 */
1432 case VMMR0_DO_VMMR0_INIT:
1433 rc = vmmR0InitVM(pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1434 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1435 break;
1436
1437 /*
1438 * Terminate the R0 part of a VM instance.
1439 */
1440 case VMMR0_DO_VMMR0_TERM:
1441 rc = VMMR0TermVM(pVM, NULL);
1442 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1443 break;
1444
1445 /*
1446 * Attempt to enable hm mode and check the current setting.
1447 */
1448 case VMMR0_DO_HM_ENABLE:
1449 rc = HMR0EnableAllCpus(pVM);
1450 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1451 break;
1452
1453 /*
1454 * Setup the hardware accelerated session.
1455 */
1456 case VMMR0_DO_HM_SETUP_VM:
1457 rc = HMR0SetupVM(pVM);
1458 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1459 break;
1460
1461 /*
1462 * Switch to RC to execute Hypervisor function.
1463 */
1464 case VMMR0_DO_CALL_HYPERVISOR:
1465 {
1466#ifdef VBOX_WITH_RAW_MODE
1467 /*
1468 * Validate input / context.
1469 */
1470 if (RT_UNLIKELY(idCpu != 0))
1471 return VERR_INVALID_CPU_ID;
1472 if (RT_UNLIKELY(pVM->cCpus != 1))
1473 return VERR_INVALID_PARAMETER;
1474 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1475# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1476 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1477 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1478# endif
1479
1480 /*
1481 * Disable interrupts.
1482 */
1483 RTCCUINTREG fFlags = ASMIntDisableFlags();
1484
1485 /*
1486 * Get the host CPU identifiers, make sure they are valid and that
1487 * we've got a TSC delta for the CPU.
1488 */
1489 RTCPUID idHostCpu;
1490 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1491 if (RT_UNLIKELY(iHostCpuSet >= RTCPUSET_MAX_CPUS))
1492 {
1493 ASMSetFlags(fFlags);
1494 return VERR_INVALID_CPU_INDEX;
1495 }
1496 if (RT_UNLIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1497 {
1498 ASMSetFlags(fFlags);
1499 rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1500 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1501 0 /*default cTries*/);
1502 if (RT_FAILURE(rc) && rc != VERR_CPU_OFFLINE)
1503 {
1504 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1505 return rc;
1506 }
1507 }
1508
1509 /*
1510 * Commit the CPU identifiers.
1511 */
1512# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1513 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1514# endif
1515 pVCpu->iHostCpuSet = iHostCpuSet;
1516 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1517
1518 /*
1519 * We might need to disable VT-x if the active switcher turns off paging.
1520 */
1521 bool fVTxDisabled;
1522 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1523 if (RT_SUCCESS(rc))
1524 {
1525 /*
1526 * Go through the wormhole...
1527 */
1528 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1529
1530 /*
1531 * Re-enable VT-x before we dispatch any pending host interrupts.
1532 */
1533 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1534
1535 if ( rc == VINF_EM_RAW_INTERRUPT
1536 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1537 TRPMR0DispatchHostInterrupt(pVM);
1538 }
1539
1540 /*
1541 * Invalidate the host CPU identifiers as we restore interrupts.
1542 */
1543 pVCpu->iHostCpuSet = UINT32_MAX;
1544 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1545 ASMSetFlags(fFlags);
1546
1547#else /* !VBOX_WITH_RAW_MODE */
1548 rc = VERR_RAW_MODE_NOT_SUPPORTED;
1549#endif
1550 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1551 break;
1552 }
1553
1554 /*
1555 * PGM wrappers.
1556 */
1557 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1558 if (idCpu == NIL_VMCPUID)
1559 return VERR_INVALID_CPU_ID;
1560 rc = PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
1561 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1562 break;
1563
1564 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1565 if (idCpu == NIL_VMCPUID)
1566 return VERR_INVALID_CPU_ID;
1567 rc = PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu]);
1568 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1569 break;
1570
1571 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1572 if (idCpu == NIL_VMCPUID)
1573 return VERR_INVALID_CPU_ID;
1574 rc = PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
1575 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1576 break;
1577
1578 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1579 if (idCpu != 0)
1580 return VERR_INVALID_CPU_ID;
1581 rc = PGMR0PhysSetupIommu(pVM);
1582 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1583 break;
1584
1585 /*
1586 * GMM wrappers.
1587 */
1588 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1589 if (u64Arg)
1590 return VERR_INVALID_PARAMETER;
1591 rc = GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1592 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1593 break;
1594
1595 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1596 if (u64Arg)
1597 return VERR_INVALID_PARAMETER;
1598 rc = GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1599 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1600 break;
1601
1602 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1603 if (u64Arg)
1604 return VERR_INVALID_PARAMETER;
1605 rc = GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1606 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1607 break;
1608
1609 case VMMR0_DO_GMM_FREE_PAGES:
1610 if (u64Arg)
1611 return VERR_INVALID_PARAMETER;
1612 rc = GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1613 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1614 break;
1615
1616 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1617 if (u64Arg)
1618 return VERR_INVALID_PARAMETER;
1619 rc = GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1620 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1621 break;
1622
1623 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1624 if (u64Arg)
1625 return VERR_INVALID_PARAMETER;
1626 rc = GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
1627 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1628 break;
1629
1630 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1631 if (idCpu == NIL_VMCPUID)
1632 return VERR_INVALID_CPU_ID;
1633 if (u64Arg)
1634 return VERR_INVALID_PARAMETER;
1635 rc = GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1636 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1637 break;
1638
1639 case VMMR0_DO_GMM_BALLOONED_PAGES:
1640 if (u64Arg)
1641 return VERR_INVALID_PARAMETER;
1642 rc = GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1643 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1644 break;
1645
1646 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1647 if (u64Arg)
1648 return VERR_INVALID_PARAMETER;
1649 rc = GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1650 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1651 break;
1652
1653 case VMMR0_DO_GMM_SEED_CHUNK:
1654 if (pReqHdr)
1655 return VERR_INVALID_PARAMETER;
1656 rc = GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
1657 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1658 break;
1659
1660 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1661 if (idCpu == NIL_VMCPUID)
1662 return VERR_INVALID_CPU_ID;
1663 if (u64Arg)
1664 return VERR_INVALID_PARAMETER;
1665 rc = GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1666 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1667 break;
1668
1669 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1670 if (idCpu == NIL_VMCPUID)
1671 return VERR_INVALID_CPU_ID;
1672 if (u64Arg)
1673 return VERR_INVALID_PARAMETER;
1674 rc = GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1675 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1676 break;
1677
1678 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1679 if (idCpu == NIL_VMCPUID)
1680 return VERR_INVALID_CPU_ID;
1681 if ( u64Arg
1682 || pReqHdr)
1683 return VERR_INVALID_PARAMETER;
1684 rc = GMMR0ResetSharedModules(pVM, idCpu);
1685 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1686 break;
1687
1688#ifdef VBOX_WITH_PAGE_SHARING
1689 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1690 {
1691 if (idCpu == NIL_VMCPUID)
1692 return VERR_INVALID_CPU_ID;
1693 if ( u64Arg
1694 || pReqHdr)
1695 return VERR_INVALID_PARAMETER;
1696
1697 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1698 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf());
1699
1700# ifdef DEBUG_sandervl
1701 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
1702 /* Todo: this can have bad side effects for unexpected jumps back to r3. */
1703 rc = GMMR0CheckSharedModulesStart(pVM);
1704 if (rc == VINF_SUCCESS)
1705 {
1706 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
1707 Assert( rc == VINF_SUCCESS
1708 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
1709 GMMR0CheckSharedModulesEnd(pVM);
1710 }
1711# else
1712 rc = GMMR0CheckSharedModules(pVM, pVCpu);
1713# endif
1714 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1715 break;
1716 }
1717#endif
1718
1719#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1720 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1721 if (u64Arg)
1722 return VERR_INVALID_PARAMETER;
1723 rc = GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1724 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1725 break;
1726#endif
1727
1728 case VMMR0_DO_GMM_QUERY_STATISTICS:
1729 if (u64Arg)
1730 return VERR_INVALID_PARAMETER;
1731 rc = GMMR0QueryStatisticsReq(pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1732 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1733 break;
1734
1735 case VMMR0_DO_GMM_RESET_STATISTICS:
1736 if (u64Arg)
1737 return VERR_INVALID_PARAMETER;
1738 rc = GMMR0ResetStatisticsReq(pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1739 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1740 break;
1741
1742 /*
1743 * A quick GCFGM mock-up.
1744 */
1745 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1746 case VMMR0_DO_GCFGM_SET_VALUE:
1747 case VMMR0_DO_GCFGM_QUERY_VALUE:
1748 {
1749 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1750 return VERR_INVALID_PARAMETER;
1751 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1752 if (pReq->Hdr.cbReq != sizeof(*pReq))
1753 return VERR_INVALID_PARAMETER;
1754 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1755 {
1756 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1757 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1758 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1759 }
1760 else
1761 {
1762 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1763 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1764 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1765 }
1766 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1767 break;
1768 }
1769
1770 /*
1771 * PDM Wrappers.
1772 */
1773 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1774 {
1775 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1776 return VERR_INVALID_PARAMETER;
1777 rc = PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1778 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1779 break;
1780 }
1781
1782 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1783 {
1784 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1785 return VERR_INVALID_PARAMETER;
1786 rc = PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1787 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1788 break;
1789 }
1790
1791 /*
1792 * Requests to the internal networking service.
1793 */
1794 case VMMR0_DO_INTNET_OPEN:
1795 {
1796 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1797 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1798 return VERR_INVALID_PARAMETER;
1799 rc = IntNetR0OpenReq(pSession, pReq);
1800 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1801 break;
1802 }
1803
1804 case VMMR0_DO_INTNET_IF_CLOSE:
1805 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1806 return VERR_INVALID_PARAMETER;
1807 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1808 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1809 break;
1810
1811
1812 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1813 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1814 return VERR_INVALID_PARAMETER;
1815 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1816 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1817 break;
1818
1819 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1820 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1821 return VERR_INVALID_PARAMETER;
1822 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1823 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1824 break;
1825
1826 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1827 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1828 return VERR_INVALID_PARAMETER;
1829 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1830 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1831 break;
1832
1833 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1834 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1835 return VERR_INVALID_PARAMETER;
1836 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1837 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1838 break;
1839
1840 case VMMR0_DO_INTNET_IF_SEND:
1841 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1842 return VERR_INVALID_PARAMETER;
1843 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1844 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1845 break;
1846
1847 case VMMR0_DO_INTNET_IF_WAIT:
1848 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1849 return VERR_INVALID_PARAMETER;
1850 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1851 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1852 break;
1853
1854 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1855 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1856 return VERR_INVALID_PARAMETER;
1857 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1858 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1859 break;
1860
1861#ifdef VBOX_WITH_PCI_PASSTHROUGH
1862 /*
1863 * Requests to host PCI driver service.
1864 */
1865 case VMMR0_DO_PCIRAW_REQ:
1866 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1867 return VERR_INVALID_PARAMETER;
1868 rc = PciRawR0ProcessReq(pSession, pVM, (PPCIRAWSENDREQ)pReqHdr);
1869 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1870 break;
1871#endif
1872 /*
1873 * For profiling.
1874 */
1875 case VMMR0_DO_NOP:
1876 case VMMR0_DO_SLOW_NOP:
1877 return VINF_SUCCESS;
1878
1879 /*
1880 * For testing Ring-0 APIs invoked in this environment.
1881 */
1882 case VMMR0_DO_TESTS:
1883 /** @todo make new test */
1884 return VINF_SUCCESS;
1885
1886
1887#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
1888 case VMMR0_DO_TEST_SWITCHER3264:
1889 if (idCpu == NIL_VMCPUID)
1890 return VERR_INVALID_CPU_ID;
1891 rc = HMR0TestSwitcher3264(pVM);
1892 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1893 break;
1894#endif
1895 default:
1896 /*
1897 * We're returning VERR_NOT_SUPPORT here so we've got something else
1898 * than -1 which the interrupt gate glue code might return.
1899 */
1900 Log(("operation %#x is not supported\n", enmOperation));
1901 return VERR_NOT_SUPPORTED;
1902 }
1903 return rc;
1904}
1905
1906
1907/**
1908 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1909 */
1910typedef struct VMMR0ENTRYEXARGS
1911{
1912 PVM pVM;
1913 VMCPUID idCpu;
1914 VMMR0OPERATION enmOperation;
1915 PSUPVMMR0REQHDR pReq;
1916 uint64_t u64Arg;
1917 PSUPDRVSESSION pSession;
1918} VMMR0ENTRYEXARGS;
1919/** Pointer to a vmmR0EntryExWrapper argument package. */
1920typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1921
1922/**
1923 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1924 *
1925 * @returns VBox status code.
1926 * @param pvArgs The argument package
1927 */
1928static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
1929{
1930 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1931 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1932 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1933 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1934 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1935 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1936}
1937
1938
1939/**
1940 * The Ring 0 entry point, called by the support library (SUP).
1941 *
1942 * @returns VBox status code.
1943 * @param pVM The cross context VM structure.
1944 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1945 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1946 * @param enmOperation Which operation to execute.
1947 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
1948 * @param u64Arg Some simple constant argument.
1949 * @param pSession The session of the caller.
1950 * @remarks Assume called with interrupts _enabled_.
1951 */
1952VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1953{
1954 /*
1955 * Requests that should only happen on the EMT thread will be
1956 * wrapped in a setjmp so we can assert without causing trouble.
1957 */
1958 if ( VALID_PTR(pVM)
1959 && pVM->pVMR0
1960 && idCpu < pVM->cCpus)
1961 {
1962 switch (enmOperation)
1963 {
1964 /* These might/will be called before VMMR3Init. */
1965 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1966 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1967 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1968 case VMMR0_DO_GMM_FREE_PAGES:
1969 case VMMR0_DO_GMM_BALLOONED_PAGES:
1970 /* On the mac we might not have a valid jmp buf, so check these as well. */
1971 case VMMR0_DO_VMMR0_INIT:
1972 case VMMR0_DO_VMMR0_TERM:
1973 {
1974 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1975
1976 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1977 break;
1978
1979 /** @todo validate this EMT claim... GVM knows. */
1980 VMMR0ENTRYEXARGS Args;
1981 Args.pVM = pVM;
1982 Args.idCpu = idCpu;
1983 Args.enmOperation = enmOperation;
1984 Args.pReq = pReq;
1985 Args.u64Arg = u64Arg;
1986 Args.pSession = pSession;
1987 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1988 }
1989
1990 default:
1991 break;
1992 }
1993 }
1994 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
1995}
1996
1997
1998/**
1999 * Checks whether we've armed the ring-0 long jump machinery.
2000 *
2001 * @returns @c true / @c false
2002 * @param pVCpu The cross context virtual CPU structure.
2003 * @thread EMT
2004 * @sa VMMIsLongJumpArmed
2005 */
2006VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
2007{
2008#ifdef RT_ARCH_X86
2009 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2010 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2011#else
2012 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2013 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2014#endif
2015}
2016
2017
2018/**
2019 * Checks whether we've done a ring-3 long jump.
2020 *
2021 * @returns @c true / @c false
2022 * @param pVCpu The cross context virtual CPU structure.
2023 * @thread EMT
2024 */
2025VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
2026{
2027 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2028}
2029
2030
2031/**
2032 * Internal R0 logger worker: Flush logger.
2033 *
2034 * @param pLogger The logger instance to flush.
2035 * @remark This function must be exported!
2036 */
2037VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2038{
2039#ifdef LOG_ENABLED
2040 /*
2041 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2042 * (This is a bit paranoid code.)
2043 */
2044 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2045 if ( !VALID_PTR(pR0Logger)
2046 || !VALID_PTR(pR0Logger + 1)
2047 || pLogger->u32Magic != RTLOGGER_MAGIC)
2048 {
2049# ifdef DEBUG
2050 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2051# endif
2052 return;
2053 }
2054 if (pR0Logger->fFlushingDisabled)
2055 return; /* quietly */
2056
2057 PVM pVM = pR0Logger->pVM;
2058 if ( !VALID_PTR(pVM)
2059 || pVM->pVMR0 != pVM)
2060 {
2061# ifdef DEBUG
2062 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
2063# endif
2064 return;
2065 }
2066
2067 PVMCPU pVCpu = VMMGetCpu(pVM);
2068 if (pVCpu)
2069 {
2070 /*
2071 * Check that the jump buffer is armed.
2072 */
2073# ifdef RT_ARCH_X86
2074 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2075 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2076# else
2077 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2078 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2079# endif
2080 {
2081# ifdef DEBUG
2082 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2083# endif
2084 return;
2085 }
2086 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2087 }
2088# ifdef DEBUG
2089 else
2090 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2091# endif
2092#else
2093 NOREF(pLogger);
2094#endif /* LOG_ENABLED */
2095}
2096
2097/**
2098 * Internal R0 logger worker: Custom prefix.
2099 *
2100 * @returns Number of chars written.
2101 *
2102 * @param pLogger The logger instance.
2103 * @param pchBuf The output buffer.
2104 * @param cchBuf The size of the buffer.
2105 * @param pvUser User argument (ignored).
2106 */
2107VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
2108{
2109 NOREF(pvUser);
2110#ifdef LOG_ENABLED
2111 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2112 if ( !VALID_PTR(pR0Logger)
2113 || !VALID_PTR(pR0Logger + 1)
2114 || pLogger->u32Magic != RTLOGGER_MAGIC
2115 || cchBuf < 2)
2116 return 0;
2117
2118 static const char s_szHex[17] = "0123456789abcdef";
2119 VMCPUID const idCpu = pR0Logger->idCpu;
2120 pchBuf[1] = s_szHex[ idCpu & 15];
2121 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
2122
2123 return 2;
2124#else
2125 NOREF(pLogger); NOREF(pchBuf); NOREF(cchBuf);
2126 return 0;
2127#endif
2128}
2129
2130#ifdef LOG_ENABLED
2131
2132/**
2133 * Disables flushing of the ring-0 debug log.
2134 *
2135 * @param pVCpu The cross context virtual CPU structure.
2136 */
2137VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
2138{
2139 if (pVCpu->vmm.s.pR0LoggerR0)
2140 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2141}
2142
2143
2144/**
2145 * Enables flushing of the ring-0 debug log.
2146 *
2147 * @param pVCpu The cross context virtual CPU structure.
2148 */
2149VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
2150{
2151 if (pVCpu->vmm.s.pR0LoggerR0)
2152 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2153}
2154
2155
2156/**
2157 * Checks if log flushing is disabled or not.
2158 *
2159 * @param pVCpu The cross context virtual CPU structure.
2160 */
2161VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
2162{
2163 if (pVCpu->vmm.s.pR0LoggerR0)
2164 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2165 return true;
2166}
2167#endif /* LOG_ENABLED */
2168
2169/**
2170 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2171 *
2172 * @returns true if the breakpoint should be hit, false if it should be ignored.
2173 */
2174DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2175{
2176#if 0
2177 return true;
2178#else
2179 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2180 if (pVM)
2181 {
2182 PVMCPU pVCpu = VMMGetCpu(pVM);
2183
2184 if (pVCpu)
2185 {
2186#ifdef RT_ARCH_X86
2187 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2188 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2189#else
2190 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2191 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2192#endif
2193 {
2194 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2195 return RT_FAILURE_NP(rc);
2196 }
2197 }
2198 }
2199#ifdef RT_OS_LINUX
2200 return true;
2201#else
2202 return false;
2203#endif
2204#endif
2205}
2206
2207
2208/**
2209 * Override this so we can push it up to ring-3.
2210 *
2211 * @param pszExpr Expression. Can be NULL.
2212 * @param uLine Location line number.
2213 * @param pszFile Location file name.
2214 * @param pszFunction Location function name.
2215 */
2216DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2217{
2218 /*
2219 * To the log.
2220 */
2221 LogAlways(("\n!!R0-Assertion Failed!!\n"
2222 "Expression: %s\n"
2223 "Location : %s(%d) %s\n",
2224 pszExpr, pszFile, uLine, pszFunction));
2225
2226 /*
2227 * To the global VMM buffer.
2228 */
2229 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2230 if (pVM)
2231 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2232 "\n!!R0-Assertion Failed!!\n"
2233 "Expression: %s\n"
2234 "Location : %s(%d) %s\n",
2235 pszExpr, pszFile, uLine, pszFunction);
2236
2237 /*
2238 * Continue the normal way.
2239 */
2240 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2241}
2242
2243
2244/**
2245 * Callback for RTLogFormatV which writes to the ring-3 log port.
2246 * See PFNLOGOUTPUT() for details.
2247 */
2248static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2249{
2250 for (size_t i = 0; i < cbChars; i++)
2251 {
2252 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2253 }
2254
2255 NOREF(pv);
2256 return cbChars;
2257}
2258
2259
2260/**
2261 * Override this so we can push it up to ring-3.
2262 *
2263 * @param pszFormat The format string.
2264 * @param va Arguments.
2265 */
2266DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2267{
2268 va_list vaCopy;
2269
2270 /*
2271 * Push the message to the loggers.
2272 */
2273 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2274 if (pLog)
2275 {
2276 va_copy(vaCopy, va);
2277 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2278 va_end(vaCopy);
2279 }
2280 pLog = RTLogRelGetDefaultInstance();
2281 if (pLog)
2282 {
2283 va_copy(vaCopy, va);
2284 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2285 va_end(vaCopy);
2286 }
2287
2288 /*
2289 * Push it to the global VMM buffer.
2290 */
2291 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2292 if (pVM)
2293 {
2294 va_copy(vaCopy, va);
2295 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2296 va_end(vaCopy);
2297 }
2298
2299 /*
2300 * Continue the normal way.
2301 */
2302 RTAssertMsg2V(pszFormat, va);
2303}
2304
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette