VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 57857

Last change on this file since 57857 was 57856, checked in by vboxsync, 9 years ago

VMM: warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 80.6 KB
Line 
1/* $Id: VMMR0.cpp 57856 2015-09-22 14:26:11Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/stam.h>
30#include <VBox/vmm/tm.h>
31#include "VMMInternal.h"
32#include <VBox/vmm/vm.h>
33#ifdef VBOX_WITH_PCI_PASSTHROUGH
34# include <VBox/vmm/pdmpci.h>
35#endif
36
37#include <VBox/vmm/gvmm.h>
38#include <VBox/vmm/gmm.h>
39#include <VBox/vmm/gim.h>
40#include <VBox/intnet.h>
41#include <VBox/vmm/hm.h>
42#include <VBox/param.h>
43#include <VBox/err.h>
44#include <VBox/version.h>
45#include <VBox/log.h>
46
47#include <iprt/asm-amd64-x86.h>
48#include <iprt/assert.h>
49#include <iprt/crc.h>
50#include <iprt/mp.h>
51#include <iprt/once.h>
52#include <iprt/stdarg.h>
53#include <iprt/string.h>
54#include <iprt/thread.h>
55#include <iprt/timer.h>
56
57#include "dtrace/VBoxVMM.h"
58
59
60#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
61# pragma intrinsic(_AddressOfReturnAddress)
62#endif
63
64#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
65# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
66#endif
67
68
69
70/*********************************************************************************************************************************
71* Defined Constants And Macros *
72*********************************************************************************************************************************/
73/** @def VMM_CHECK_SMAP_SETUP
74 * SMAP check setup. */
75/** @def VMM_CHECK_SMAP_CHECK
76 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
77 * it will be logged and @a a_BadExpr is executed. */
78/** @def VMM_CHECK_SMAP_CHECK2
79 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
80 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
81 * executed. */
82#if defined(VBOX_STRICT) || 1
83# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
84# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
85 do { \
86 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
87 { \
88 RTCCUINTREG fEflCheck = ASMGetFlags(); \
89 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
90 { /* likely */ } \
91 else \
92 { \
93 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
94 a_BadExpr; \
95 } \
96 } \
97 } while (0)
98# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
99 do { \
100 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
101 { \
102 RTCCUINTREG fEflCheck = ASMGetFlags(); \
103 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
104 { /* likely */ } \
105 else \
106 { \
107 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
108 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
109 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
110 a_BadExpr; \
111 } \
112 } \
113 } while (0)
114#else
115# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
116# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
117# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) NOREF(fKernelFeatures)
118#endif
119
120
121/*********************************************************************************************************************************
122* Internal Functions *
123*********************************************************************************************************************************/
124RT_C_DECLS_BEGIN
125#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
126extern uint64_t __udivdi3(uint64_t, uint64_t);
127extern uint64_t __umoddi3(uint64_t, uint64_t);
128#endif
129RT_C_DECLS_END
130
131
132/*********************************************************************************************************************************
133* Global Variables *
134*********************************************************************************************************************************/
135/** Drag in necessary library bits.
136 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
137PFNRT g_VMMR0Deps[] =
138{
139 (PFNRT)RTCrc32,
140 (PFNRT)RTOnce,
141#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
142 (PFNRT)__udivdi3,
143 (PFNRT)__umoddi3,
144#endif
145 NULL
146};
147
148#ifdef RT_OS_SOLARIS
149/* Dependency information for the native solaris loader. */
150extern "C" { char _depends_on[] = "vboxdrv"; }
151#endif
152
153
154
155/**
156 * Initialize the module.
157 * This is called when we're first loaded.
158 *
159 * @returns 0 on success.
160 * @returns VBox status on failure.
161 * @param hMod Image handle for use in APIs.
162 */
163DECLEXPORT(int) ModuleInit(void *hMod)
164{
165 VMM_CHECK_SMAP_SETUP();
166 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
167
168#ifdef VBOX_WITH_DTRACE_R0
169 /*
170 * The first thing to do is register the static tracepoints.
171 * (Deregistration is automatic.)
172 */
173 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
174 if (RT_FAILURE(rc2))
175 return rc2;
176#endif
177 LogFlow(("ModuleInit:\n"));
178
179#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
180 /*
181 * Display the CMOS debug code.
182 */
183 ASMOutU8(0x72, 0x03);
184 uint8_t bDebugCode = ASMInU8(0x73);
185 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
186 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
187#endif
188
189 /*
190 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
191 */
192 int rc = vmmInitFormatTypes();
193 if (RT_SUCCESS(rc))
194 {
195 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
196 rc = GVMMR0Init();
197 if (RT_SUCCESS(rc))
198 {
199 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
200 rc = GMMR0Init();
201 if (RT_SUCCESS(rc))
202 {
203 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
204 rc = HMR0Init();
205 if (RT_SUCCESS(rc))
206 {
207 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
208 rc = PGMRegisterStringFormatTypes();
209 if (RT_SUCCESS(rc))
210 {
211 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
212#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
213 rc = PGMR0DynMapInit();
214#endif
215 if (RT_SUCCESS(rc))
216 {
217 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
218 rc = IntNetR0Init();
219 if (RT_SUCCESS(rc))
220 {
221#ifdef VBOX_WITH_PCI_PASSTHROUGH
222 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
223 rc = PciRawR0Init();
224#endif
225 if (RT_SUCCESS(rc))
226 {
227 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
228 rc = CPUMR0ModuleInit();
229 if (RT_SUCCESS(rc))
230 {
231#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
232 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
233 rc = vmmR0TripleFaultHackInit();
234 if (RT_SUCCESS(rc))
235#endif
236 {
237 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
238 if (RT_SUCCESS(rc))
239 {
240 LogFlow(("ModuleInit: returns success.\n"));
241 return VINF_SUCCESS;
242 }
243 }
244
245 /*
246 * Bail out.
247 */
248#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
249 vmmR0TripleFaultHackTerm();
250#endif
251 }
252 else
253 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
254#ifdef VBOX_WITH_PCI_PASSTHROUGH
255 PciRawR0Term();
256#endif
257 }
258 else
259 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
260 IntNetR0Term();
261 }
262 else
263 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
264#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
265 PGMR0DynMapTerm();
266#endif
267 }
268 else
269 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
270 PGMDeregisterStringFormatTypes();
271 }
272 else
273 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
274 HMR0Term();
275 }
276 else
277 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
278 GMMR0Term();
279 }
280 else
281 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
282 GVMMR0Term();
283 }
284 else
285 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
286 vmmTermFormatTypes();
287 }
288 else
289 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
290
291 LogFlow(("ModuleInit: failed %Rrc\n", rc));
292 return rc;
293}
294
295
296/**
297 * Terminate the module.
298 * This is called when we're finally unloaded.
299 *
300 * @param hMod Image handle for use in APIs.
301 */
302DECLEXPORT(void) ModuleTerm(void *hMod)
303{
304 NOREF(hMod);
305 LogFlow(("ModuleTerm:\n"));
306
307 /*
308 * Terminate the CPUM module (Local APIC cleanup).
309 */
310 CPUMR0ModuleTerm();
311
312 /*
313 * Terminate the internal network service.
314 */
315 IntNetR0Term();
316
317 /*
318 * PGM (Darwin), HM and PciRaw global cleanup.
319 */
320#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
321 PGMR0DynMapTerm();
322#endif
323#ifdef VBOX_WITH_PCI_PASSTHROUGH
324 PciRawR0Term();
325#endif
326 PGMDeregisterStringFormatTypes();
327 HMR0Term();
328#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
329 vmmR0TripleFaultHackTerm();
330#endif
331
332 /*
333 * Destroy the GMM and GVMM instances.
334 */
335 GMMR0Term();
336 GVMMR0Term();
337
338 vmmTermFormatTypes();
339
340 LogFlow(("ModuleTerm: returns\n"));
341}
342
343
344/**
345 * Initiates the R0 driver for a particular VM instance.
346 *
347 * @returns VBox status code.
348 *
349 * @param pVM Pointer to the VM.
350 * @param uSvnRev The SVN revision of the ring-3 part.
351 * @param uBuildType Build type indicator.
352 * @thread EMT.
353 */
354static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
355{
356 VMM_CHECK_SMAP_SETUP();
357 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
358
359 /*
360 * Match the SVN revisions and build type.
361 */
362 if (uSvnRev != VMMGetSvnRev())
363 {
364 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
365 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
366 return VERR_VMM_R0_VERSION_MISMATCH;
367 }
368 if (uBuildType != vmmGetBuildType())
369 {
370 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
371 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
372 return VERR_VMM_R0_VERSION_MISMATCH;
373 }
374 if ( !VALID_PTR(pVM)
375 || pVM->pVMR0 != pVM)
376 return VERR_INVALID_PARAMETER;
377
378
379#ifdef LOG_ENABLED
380 /*
381 * Register the EMT R0 logger instance for VCPU 0.
382 */
383 PVMCPU pVCpu = &pVM->aCpus[0];
384
385 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
386 if (pR0Logger)
387 {
388# if 0 /* testing of the logger. */
389 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
390 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
391 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
392 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
393
394 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
395 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
396 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
397 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
398
399 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
400 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
401 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
402 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
403
404 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
405 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
406 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
407 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
408 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
409 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
410
411 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
412 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
413
414 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
415 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
416 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
417# endif
418 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
419 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
420 pR0Logger->fRegistered = true;
421 }
422#endif /* LOG_ENABLED */
423
424 /*
425 * Check if the host supports high resolution timers or not.
426 */
427 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
428 && !RTTimerCanDoHighResolution())
429 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
430
431 /*
432 * Initialize the per VM data for GVMM and GMM.
433 */
434 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
435 int rc = GVMMR0InitVM(pVM);
436// if (RT_SUCCESS(rc))
437// rc = GMMR0InitPerVMData(pVM);
438 if (RT_SUCCESS(rc))
439 {
440 /*
441 * Init HM, CPUM and PGM (Darwin only).
442 */
443 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
444 rc = HMR0InitVM(pVM);
445 if (RT_SUCCESS(rc))
446 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
447 if (RT_SUCCESS(rc))
448 {
449 rc = CPUMR0InitVM(pVM);
450 if (RT_SUCCESS(rc))
451 {
452 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
453#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
454 rc = PGMR0DynMapInitVM(pVM);
455#endif
456 if (RT_SUCCESS(rc))
457 {
458 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
459#ifdef VBOX_WITH_PCI_PASSTHROUGH
460 rc = PciRawR0InitVM(pVM);
461#endif
462 if (RT_SUCCESS(rc))
463 {
464 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
465 rc = GIMR0InitVM(pVM);
466 if (RT_SUCCESS(rc))
467 {
468 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
469 if (RT_SUCCESS(rc))
470 {
471 GVMMR0DoneInitVM(pVM);
472 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
473 return rc;
474 }
475
476 /* bail out*/
477 GIMR0TermVM(pVM);
478 }
479#ifdef VBOX_WITH_PCI_PASSTHROUGH
480 PciRawR0TermVM(pVM);
481#endif
482 }
483 }
484 }
485 HMR0TermVM(pVM);
486 }
487 }
488
489 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
490 return rc;
491}
492
493
494/**
495 * Terminates the R0 bits for a particular VM instance.
496 *
497 * This is normally called by ring-3 as part of the VM termination process, but
498 * may alternatively be called during the support driver session cleanup when
499 * the VM object is destroyed (see GVMM).
500 *
501 * @returns VBox status code.
502 *
503 * @param pVM Pointer to the VM.
504 * @param pGVM Pointer to the global VM structure. Optional.
505 * @thread EMT or session clean up thread.
506 */
507VMMR0_INT_DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
508{
509#ifdef VBOX_WITH_PCI_PASSTHROUGH
510 PciRawR0TermVM(pVM);
511#endif
512
513 /*
514 * Tell GVMM what we're up to and check that we only do this once.
515 */
516 if (GVMMR0DoingTermVM(pVM, pGVM))
517 {
518 GIMR0TermVM(pVM);
519
520 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
521 * here to make sure we don't leak any shared pages if we crash... */
522#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
523 PGMR0DynMapTermVM(pVM);
524#endif
525 HMR0TermVM(pVM);
526 }
527
528 /*
529 * Deregister the logger.
530 */
531 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
532 return VINF_SUCCESS;
533}
534
535
536/**
537 * VMM ring-0 thread-context callback.
538 *
539 * This does common HM state updating and calls the HM-specific thread-context
540 * callback.
541 *
542 * @param enmEvent The thread-context event.
543 * @param pvUser Opaque pointer to the VMCPU.
544 *
545 * @thread EMT(pvUser)
546 */
547static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
548{
549 PVMCPU pVCpu = (PVMCPU)pvUser;
550
551 switch (enmEvent)
552 {
553 case RTTHREADCTXEVENT_IN:
554 {
555 /*
556 * Linux may call us with preemption enabled (really!) but technically we
557 * cannot get preempted here, otherwise we end up in an infinite recursion
558 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
559 * ad infinitum). Let's just disable preemption for now...
560 */
561 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
562 * preemption after doing the callout (one or two functions up the
563 * call chain). */
564 /** @todo r=ramshankar: See @bugref{5313#c30}. */
565 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
566 RTThreadPreemptDisable(&ParanoidPreemptState);
567
568 /* We need to update the VCPU <-> host CPU mapping. */
569 RTCPUID idHostCpu;
570 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
571 pVCpu->iHostCpuSet = iHostCpuSet;
572 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
573
574 /* In the very unlikely event that the GIP delta for the CPU we're
575 rescheduled needs calculating, try force a return to ring-3.
576 We unfortunately cannot do the measurements right here. */
577 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
578 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
579
580 /* Invoke the HM-specific thread-context callback. */
581 HMR0ThreadCtxCallback(enmEvent, pvUser);
582
583 /* Restore preemption. */
584 RTThreadPreemptRestore(&ParanoidPreemptState);
585 break;
586 }
587
588 case RTTHREADCTXEVENT_OUT:
589 {
590 /* Invoke the HM-specific thread-context callback. */
591 HMR0ThreadCtxCallback(enmEvent, pvUser);
592
593 /*
594 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
595 * have the same host CPU associated with it.
596 */
597 pVCpu->iHostCpuSet = UINT32_MAX;
598 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
599 break;
600 }
601
602 default:
603 /* Invoke the HM-specific thread-context callback. */
604 HMR0ThreadCtxCallback(enmEvent, pvUser);
605 break;
606 }
607}
608
609
610/**
611 * Creates thread switching hook for the current EMT thread.
612 *
613 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
614 * platform does not implement switcher hooks, no hooks will be create and the
615 * member set to NIL_RTTHREADCTXHOOK.
616 *
617 * @returns VBox status code.
618 * @param pVCpu Pointer to the cross context CPU structure.
619 * @thread EMT(pVCpu)
620 */
621VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu)
622{
623 VMCPU_ASSERT_EMT(pVCpu);
624 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
625
626 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
627 if (RT_SUCCESS(rc))
628 return rc;
629
630 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
631 if (rc == VERR_NOT_SUPPORTED)
632 return VINF_SUCCESS;
633
634 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
635 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
636}
637
638
639/**
640 * Destroys the thread switching hook for the specified VCPU.
641 *
642 * @param pVCpu Pointer to the cross context CPU structure.
643 * @remarks Can be called from any thread.
644 */
645VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu)
646{
647 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
648 AssertRC(rc);
649}
650
651
652/**
653 * Disables the thread switching hook for this VCPU (if we got one).
654 *
655 * @param pVCpu Pointer to the cross context CPU structure.
656 * @thread EMT(pVCpu)
657 *
658 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
659 * this call. This means you have to be careful with what you do!
660 */
661VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu)
662{
663 /*
664 * Clear the VCPU <-> host CPU mapping as we've left HM context.
665 * @bugref{7726#c19} explains the need for this trick:
666 *
667 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
668 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
669 * longjmp & normal return to ring-3, which opens a window where we may be
670 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
671 * the CPU starts executing a different EMT. Both functions first disables
672 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
673 * an opening for getting preempted.
674 */
675 /** @todo Make HM not need this API! Then we could leave the hooks enabled
676 * all the time. */
677 /** @todo move this into the context hook disabling if(). */
678 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
679
680 /*
681 * Disable the context hook, if we got one.
682 */
683 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
684 {
685 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
686 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
687 AssertRC(rc);
688 }
689}
690
691
692/**
693 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
694 *
695 * @returns true if registered, false otherwise.
696 * @param pVCpu Pointer to the VMCPU.
697 */
698DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
699{
700 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
701}
702
703
704/**
705 * Whether thread-context hooks are registered for this VCPU.
706 *
707 * @returns true if registered, false otherwise.
708 * @param pVCpu Pointer to the VMCPU.
709 */
710VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
711{
712 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
713}
714
715
716#ifdef VBOX_WITH_STATISTICS
717/**
718 * Record return code statistics
719 * @param pVM Pointer to the VM.
720 * @param pVCpu Pointer to the VMCPU.
721 * @param rc The status code.
722 */
723static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
724{
725 /*
726 * Collect statistics.
727 */
728 switch (rc)
729 {
730 case VINF_SUCCESS:
731 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
732 break;
733 case VINF_EM_RAW_INTERRUPT:
734 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
735 break;
736 case VINF_EM_RAW_INTERRUPT_HYPER:
737 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
738 break;
739 case VINF_EM_RAW_GUEST_TRAP:
740 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
741 break;
742 case VINF_EM_RAW_RING_SWITCH:
743 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
744 break;
745 case VINF_EM_RAW_RING_SWITCH_INT:
746 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
747 break;
748 case VINF_EM_RAW_STALE_SELECTOR:
749 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
750 break;
751 case VINF_EM_RAW_IRET_TRAP:
752 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
753 break;
754 case VINF_IOM_R3_IOPORT_READ:
755 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
756 break;
757 case VINF_IOM_R3_IOPORT_WRITE:
758 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
759 break;
760 case VINF_IOM_R3_MMIO_READ:
761 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
762 break;
763 case VINF_IOM_R3_MMIO_WRITE:
764 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
765 break;
766 case VINF_IOM_R3_MMIO_READ_WRITE:
767 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
768 break;
769 case VINF_PATM_HC_MMIO_PATCH_READ:
770 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
771 break;
772 case VINF_PATM_HC_MMIO_PATCH_WRITE:
773 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
774 break;
775 case VINF_CPUM_R3_MSR_READ:
776 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
777 break;
778 case VINF_CPUM_R3_MSR_WRITE:
779 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
780 break;
781 case VINF_EM_RAW_EMULATE_INSTR:
782 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
783 break;
784 case VINF_EM_RAW_EMULATE_IO_BLOCK:
785 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
786 break;
787 case VINF_PATCH_EMULATE_INSTR:
788 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
789 break;
790 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
791 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
792 break;
793 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
794 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
795 break;
796 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
797 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
798 break;
799 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
800 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
801 break;
802 case VINF_CSAM_PENDING_ACTION:
803 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
804 break;
805 case VINF_PGM_SYNC_CR3:
806 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
807 break;
808 case VINF_PATM_PATCH_INT3:
809 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
810 break;
811 case VINF_PATM_PATCH_TRAP_PF:
812 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
813 break;
814 case VINF_PATM_PATCH_TRAP_GP:
815 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
816 break;
817 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
818 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
819 break;
820 case VINF_EM_RESCHEDULE_REM:
821 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
822 break;
823 case VINF_EM_RAW_TO_R3:
824 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
825 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
826 else if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
827 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
828 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES))
829 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
830 else if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
831 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
832 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
833 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
834 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))
835 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
836 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
837 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
838 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3))
839 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
840 else
841 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
842 break;
843
844 case VINF_EM_RAW_TIMER_PENDING:
845 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
846 break;
847 case VINF_EM_RAW_INTERRUPT_PENDING:
848 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
849 break;
850 case VINF_VMM_CALL_HOST:
851 switch (pVCpu->vmm.s.enmCallRing3Operation)
852 {
853 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
854 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
855 break;
856 case VMMCALLRING3_PDM_LOCK:
857 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
858 break;
859 case VMMCALLRING3_PGM_POOL_GROW:
860 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
861 break;
862 case VMMCALLRING3_PGM_LOCK:
863 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
864 break;
865 case VMMCALLRING3_PGM_MAP_CHUNK:
866 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
867 break;
868 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
869 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
870 break;
871 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
872 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
873 break;
874 case VMMCALLRING3_VMM_LOGGER_FLUSH:
875 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
876 break;
877 case VMMCALLRING3_VM_SET_ERROR:
878 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
879 break;
880 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
881 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
882 break;
883 case VMMCALLRING3_VM_R0_ASSERTION:
884 default:
885 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
886 break;
887 }
888 break;
889 case VINF_PATM_DUPLICATE_FUNCTION:
890 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
891 break;
892 case VINF_PGM_CHANGE_MODE:
893 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
894 break;
895 case VINF_PGM_POOL_FLUSH_PENDING:
896 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
897 break;
898 case VINF_EM_PENDING_REQUEST:
899 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
900 break;
901 case VINF_EM_HM_PATCH_TPR_INSTR:
902 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
903 break;
904 default:
905 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
906 break;
907 }
908}
909#endif /* VBOX_WITH_STATISTICS */
910
911
912/**
913 * The Ring 0 entry point, called by the fast-ioctl path.
914 *
915 * @param pVM Pointer to the VM.
916 * The return code is stored in pVM->vmm.s.iLastGZRc.
917 * @param idCpu The Virtual CPU ID of the calling EMT.
918 * @param enmOperation Which operation to execute.
919 * @remarks Assume called with interrupts _enabled_.
920 */
921VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
922{
923 /*
924 * Validation.
925 */
926 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
927 return;
928 PVMCPU pVCpu = &pVM->aCpus[idCpu];
929 if (RT_UNLIKELY(pVCpu->hNativeThreadR0 != RTThreadNativeSelf()))
930 return;
931 VMM_CHECK_SMAP_SETUP();
932 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
933
934 /*
935 * Perform requested operation.
936 */
937 switch (enmOperation)
938 {
939 /*
940 * Switch to GC and run guest raw mode code.
941 * Disable interrupts before doing the world switch.
942 */
943 case VMMR0_DO_RAW_RUN:
944 {
945#ifdef VBOX_WITH_RAW_MODE
946# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
947 /* Some safety precautions first. */
948 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
949 {
950 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
951 break;
952 }
953# endif
954
955 /*
956 * Disable preemption.
957 */
958 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
959 RTThreadPreemptDisable(&PreemptState);
960
961 /*
962 * Get the host CPU identifiers, make sure they are valid and that
963 * we've got a TSC delta for the CPU.
964 */
965 RTCPUID idHostCpu;
966 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
967 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
968 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
969 {
970 /*
971 * Commit the CPU identifiers and update the periodict preemption timer if it's active.
972 */
973# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
974 CPUMR0SetLApic(pVCpu, iHostCpuSet);
975# endif
976 pVCpu->iHostCpuSet = iHostCpuSet;
977 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
978
979 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
980 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
981
982 /*
983 * We might need to disable VT-x if the active switcher turns off paging.
984 */
985 bool fVTxDisabled;
986 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
987 if (RT_SUCCESS(rc))
988 {
989 /*
990 * Disable interrupts and run raw-mode code. The loop is for efficiently
991 * dispatching tracepoints that fired in raw-mode context.
992 */
993 RTCCUINTREG uFlags = ASMIntDisableFlags();
994
995 for (;;)
996 {
997 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
998 TMNotifyStartOfExecution(pVCpu);
999
1000 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1001 pVCpu->vmm.s.iLastGZRc = rc;
1002
1003 TMNotifyEndOfExecution(pVCpu);
1004 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1005
1006 if (rc != VINF_VMM_CALL_TRACER)
1007 break;
1008 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
1009 }
1010
1011 /*
1012 * Re-enable VT-x before we dispatch any pending host interrupts and
1013 * re-enables interrupts.
1014 */
1015 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1016
1017 if ( rc == VINF_EM_RAW_INTERRUPT
1018 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1019 TRPMR0DispatchHostInterrupt(pVM);
1020
1021 ASMSetFlags(uFlags);
1022
1023 /* Fire dtrace probe and collect statistics. */
1024 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1025# ifdef VBOX_WITH_STATISTICS
1026 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1027 vmmR0RecordRC(pVM, pVCpu, rc);
1028# endif
1029 }
1030 else
1031 pVCpu->vmm.s.iLastGZRc = rc;
1032
1033 /*
1034 * Invalidate the host CPU identifiers as we restore preemption.
1035 */
1036 pVCpu->iHostCpuSet = UINT32_MAX;
1037 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1038
1039 RTThreadPreemptRestore(&PreemptState);
1040 }
1041 /*
1042 * Invalid CPU set index or TSC delta in need of measuring.
1043 */
1044 else
1045 {
1046 RTThreadPreemptRestore(&PreemptState);
1047 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1048 {
1049 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1050 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1051 0 /*default cTries*/);
1052 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1053 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1054 else
1055 pVCpu->vmm.s.iLastGZRc = rc;
1056 }
1057 else
1058 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1059 }
1060
1061#else /* !VBOX_WITH_RAW_MODE */
1062 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_NOT_SUPPORTED;
1063#endif
1064 break;
1065 }
1066
1067 /*
1068 * Run guest code using the available hardware acceleration technology.
1069 */
1070 case VMMR0_DO_HM_RUN:
1071 {
1072 /*
1073 * Disable preemption.
1074 */
1075 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1076 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1077 RTThreadPreemptDisable(&PreemptState);
1078
1079 /*
1080 * Get the host CPU identifiers, make sure they are valid and that
1081 * we've got a TSC delta for the CPU.
1082 */
1083 RTCPUID idHostCpu;
1084 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1085 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1086 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1087 {
1088 pVCpu->iHostCpuSet = iHostCpuSet;
1089 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1090
1091 /*
1092 * Update the periodic preemption timer if it's active.
1093 */
1094 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1095 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1096 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1097
1098#ifdef LOG_ENABLED
1099 /*
1100 * Ugly: Lazy registration of ring 0 loggers.
1101 */
1102 if (pVCpu->idCpu > 0)
1103 {
1104 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
1105 if ( pR0Logger
1106 && RT_UNLIKELY(!pR0Logger->fRegistered))
1107 {
1108 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
1109 pR0Logger->fRegistered = true;
1110 }
1111 }
1112#endif
1113
1114 int rc;
1115 bool fPreemptRestored = false;
1116 if (!HMR0SuspendPending())
1117 {
1118 /*
1119 * Enable the context switching hook.
1120 */
1121 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1122 {
1123 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1124 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1125 }
1126
1127 /*
1128 * Enter HM context.
1129 */
1130 rc = HMR0Enter(pVM, pVCpu);
1131 if (RT_SUCCESS(rc))
1132 {
1133 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1134
1135 /*
1136 * When preemption hooks are in place, enable preemption now that
1137 * we're in HM context.
1138 */
1139 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1140 {
1141 fPreemptRestored = true;
1142 RTThreadPreemptRestore(&PreemptState);
1143 }
1144
1145 /*
1146 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1147 */
1148 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1149 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1150 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1151
1152 /*
1153 * Assert sanity on the way out. Using manual assertions code here as normal
1154 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1155 */
1156 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1157 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1158 {
1159 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1160 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1161 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1162 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1163 }
1164 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1165 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1166 {
1167 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1168 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1169 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1170 rc = VERR_INVALID_STATE;
1171 }
1172
1173 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1174 }
1175 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1176
1177 /*
1178 * Invalidate the host CPU identifiers before we disable the context
1179 * hook / restore preemption.
1180 */
1181 pVCpu->iHostCpuSet = UINT32_MAX;
1182 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1183
1184 /*
1185 * Disable context hooks. Due to unresolved cleanup issues, we
1186 * cannot leave the hooks enabled when we return to ring-3.
1187 *
1188 * Note! At the moment HM may also have disabled the hook
1189 * when we get here, but the IPRT API handles that.
1190 */
1191 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1192 {
1193 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1194 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1195 }
1196 }
1197 /*
1198 * The system is about to go into suspend mode; go back to ring 3.
1199 */
1200 else
1201 {
1202 rc = VINF_EM_RAW_INTERRUPT;
1203 pVCpu->iHostCpuSet = UINT32_MAX;
1204 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1205 }
1206
1207 /** @todo When HM stops messing with the context hook state, we'll disable
1208 * preemption again before the RTThreadCtxHookDisable call. */
1209 if (!fPreemptRestored)
1210 RTThreadPreemptRestore(&PreemptState);
1211
1212 pVCpu->vmm.s.iLastGZRc = rc;
1213
1214 /* Fire dtrace probe and collect statistics. */
1215 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1216#ifdef VBOX_WITH_STATISTICS
1217 vmmR0RecordRC(pVM, pVCpu, rc);
1218#endif
1219 }
1220 /*
1221 * Invalid CPU set index or TSC delta in need of measuring.
1222 */
1223 else
1224 {
1225 pVCpu->iHostCpuSet = UINT32_MAX;
1226 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1227 RTThreadPreemptRestore(&PreemptState);
1228 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1229 {
1230 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1231 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1232 0 /*default cTries*/);
1233 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1234 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1235 else
1236 pVCpu->vmm.s.iLastGZRc = rc;
1237 }
1238 else
1239 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1240 }
1241 break;
1242 }
1243
1244 /*
1245 * For profiling.
1246 */
1247 case VMMR0_DO_NOP:
1248 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1249 break;
1250
1251 /*
1252 * Impossible.
1253 */
1254 default:
1255 AssertMsgFailed(("%#x\n", enmOperation));
1256 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1257 break;
1258 }
1259 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1260}
1261
1262
1263/**
1264 * Validates a session or VM session argument.
1265 *
1266 * @returns true / false accordingly.
1267 * @param pVM Pointer to the VM.
1268 * @param pSession The session argument.
1269 */
1270DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1271{
1272 /* This must be set! */
1273 if (!pSession)
1274 return false;
1275
1276 /* Only one out of the two. */
1277 if (pVM && pClaimedSession)
1278 return false;
1279 if (pVM)
1280 pClaimedSession = pVM->pSession;
1281 return pClaimedSession == pSession;
1282}
1283
1284
1285/**
1286 * VMMR0EntryEx worker function, either called directly or when ever possible
1287 * called thru a longjmp so we can exit safely on failure.
1288 *
1289 * @returns VBox status code.
1290 * @param pVM Pointer to the VM.
1291 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1292 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1293 * @param enmOperation Which operation to execute.
1294 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1295 * The support driver validates this if it's present.
1296 * @param u64Arg Some simple constant argument.
1297 * @param pSession The session of the caller.
1298 * @remarks Assume called with interrupts _enabled_.
1299 */
1300static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1301{
1302 /*
1303 * Common VM pointer validation.
1304 */
1305 if (pVM)
1306 {
1307 if (RT_UNLIKELY( !VALID_PTR(pVM)
1308 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
1309 {
1310 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
1311 return VERR_INVALID_POINTER;
1312 }
1313 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
1314 || pVM->enmVMState > VMSTATE_TERMINATED
1315 || pVM->pVMR0 != pVM))
1316 {
1317 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
1318 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
1319 return VERR_INVALID_POINTER;
1320 }
1321
1322 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
1323 {
1324 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
1325 return VERR_INVALID_PARAMETER;
1326 }
1327 }
1328 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
1329 {
1330 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1331 return VERR_INVALID_PARAMETER;
1332 }
1333 VMM_CHECK_SMAP_SETUP();
1334 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1335 int rc;
1336
1337 switch (enmOperation)
1338 {
1339 /*
1340 * GVM requests
1341 */
1342 case VMMR0_DO_GVMM_CREATE_VM:
1343 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
1344 return VERR_INVALID_PARAMETER;
1345 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
1346 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1347 break;
1348
1349 case VMMR0_DO_GVMM_DESTROY_VM:
1350 if (pReqHdr || u64Arg)
1351 return VERR_INVALID_PARAMETER;
1352 rc = GVMMR0DestroyVM(pVM);
1353 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1354 break;
1355
1356 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1357 {
1358 if (!pVM)
1359 return VERR_INVALID_PARAMETER;
1360 rc = GVMMR0RegisterVCpu(pVM, idCpu);
1361 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1362 break;
1363 }
1364
1365 case VMMR0_DO_GVMM_SCHED_HALT:
1366 if (pReqHdr)
1367 return VERR_INVALID_PARAMETER;
1368 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1369 rc = GVMMR0SchedHalt(pVM, idCpu, u64Arg);
1370 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1371 break;
1372
1373 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1374 if (pReqHdr || u64Arg)
1375 return VERR_INVALID_PARAMETER;
1376 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1377 rc = GVMMR0SchedWakeUp(pVM, idCpu);
1378 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1379 break;
1380
1381 case VMMR0_DO_GVMM_SCHED_POKE:
1382 if (pReqHdr || u64Arg)
1383 return VERR_INVALID_PARAMETER;
1384 rc = GVMMR0SchedPoke(pVM, idCpu);
1385 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1386 break;
1387
1388 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1389 if (u64Arg)
1390 return VERR_INVALID_PARAMETER;
1391 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1392 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1393 break;
1394
1395 case VMMR0_DO_GVMM_SCHED_POLL:
1396 if (pReqHdr || u64Arg > 1)
1397 return VERR_INVALID_PARAMETER;
1398 rc = GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
1399 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1400 break;
1401
1402 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1403 if (u64Arg)
1404 return VERR_INVALID_PARAMETER;
1405 rc = GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
1406 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1407 break;
1408
1409 case VMMR0_DO_GVMM_RESET_STATISTICS:
1410 if (u64Arg)
1411 return VERR_INVALID_PARAMETER;
1412 rc = GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
1413 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1414 break;
1415
1416 /*
1417 * Initialize the R0 part of a VM instance.
1418 */
1419 case VMMR0_DO_VMMR0_INIT:
1420 rc = vmmR0InitVM(pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1421 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1422 break;
1423
1424 /*
1425 * Terminate the R0 part of a VM instance.
1426 */
1427 case VMMR0_DO_VMMR0_TERM:
1428 rc = VMMR0TermVM(pVM, NULL);
1429 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1430 break;
1431
1432 /*
1433 * Attempt to enable hm mode and check the current setting.
1434 */
1435 case VMMR0_DO_HM_ENABLE:
1436 rc = HMR0EnableAllCpus(pVM);
1437 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1438 break;
1439
1440 /*
1441 * Setup the hardware accelerated session.
1442 */
1443 case VMMR0_DO_HM_SETUP_VM:
1444 rc = HMR0SetupVM(pVM);
1445 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1446 break;
1447
1448 /*
1449 * Switch to RC to execute Hypervisor function.
1450 */
1451 case VMMR0_DO_CALL_HYPERVISOR:
1452 {
1453#ifdef VBOX_WITH_RAW_MODE
1454 /*
1455 * Validate input / context.
1456 */
1457 if (RT_UNLIKELY(idCpu != 0))
1458 return VERR_INVALID_CPU_ID;
1459 if (RT_UNLIKELY(pVM->cCpus != 1))
1460 return VERR_INVALID_PARAMETER;
1461 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1462# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1463 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1464 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1465# endif
1466
1467 /*
1468 * Disable interrupts.
1469 */
1470 RTCCUINTREG fFlags = ASMIntDisableFlags();
1471
1472 /*
1473 * Get the host CPU identifiers, make sure they are valid and that
1474 * we've got a TSC delta for the CPU.
1475 */
1476 RTCPUID idHostCpu;
1477 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1478 if (RT_UNLIKELY(iHostCpuSet >= RTCPUSET_MAX_CPUS))
1479 {
1480 ASMSetFlags(fFlags);
1481 return VERR_INVALID_CPU_INDEX;
1482 }
1483 if (RT_UNLIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1484 {
1485 ASMSetFlags(fFlags);
1486 rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1487 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1488 0 /*default cTries*/);
1489 if (RT_FAILURE(rc) && rc != VERR_CPU_OFFLINE)
1490 {
1491 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1492 return rc;
1493 }
1494 }
1495
1496 /*
1497 * Commit the CPU identifiers.
1498 */
1499# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1500 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1501# endif
1502 pVCpu->iHostCpuSet = iHostCpuSet;
1503 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1504
1505 /*
1506 * We might need to disable VT-x if the active switcher turns off paging.
1507 */
1508 bool fVTxDisabled;
1509 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1510 if (RT_SUCCESS(rc))
1511 {
1512 /*
1513 * Go through the wormhole...
1514 */
1515 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1516
1517 /*
1518 * Re-enable VT-x before we dispatch any pending host interrupts.
1519 */
1520 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1521
1522 if ( rc == VINF_EM_RAW_INTERRUPT
1523 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1524 TRPMR0DispatchHostInterrupt(pVM);
1525 }
1526
1527 /*
1528 * Invalidate the host CPU identifiers as we restore interrupts.
1529 */
1530 pVCpu->iHostCpuSet = UINT32_MAX;
1531 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1532 ASMSetFlags(fFlags);
1533
1534#else /* !VBOX_WITH_RAW_MODE */
1535 rc = VERR_RAW_MODE_NOT_SUPPORTED;
1536#endif
1537 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1538 break;
1539 }
1540
1541 /*
1542 * PGM wrappers.
1543 */
1544 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1545 if (idCpu == NIL_VMCPUID)
1546 return VERR_INVALID_CPU_ID;
1547 rc = PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
1548 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1549 break;
1550
1551 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1552 if (idCpu == NIL_VMCPUID)
1553 return VERR_INVALID_CPU_ID;
1554 rc = PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu]);
1555 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1556 break;
1557
1558 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1559 if (idCpu == NIL_VMCPUID)
1560 return VERR_INVALID_CPU_ID;
1561 rc = PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
1562 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1563 break;
1564
1565 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1566 if (idCpu != 0)
1567 return VERR_INVALID_CPU_ID;
1568 rc = PGMR0PhysSetupIommu(pVM);
1569 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1570 break;
1571
1572 /*
1573 * GMM wrappers.
1574 */
1575 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1576 if (u64Arg)
1577 return VERR_INVALID_PARAMETER;
1578 rc = GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1579 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1580 break;
1581
1582 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1583 if (u64Arg)
1584 return VERR_INVALID_PARAMETER;
1585 rc = GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1586 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1587 break;
1588
1589 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1590 if (u64Arg)
1591 return VERR_INVALID_PARAMETER;
1592 rc = GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1593 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1594 break;
1595
1596 case VMMR0_DO_GMM_FREE_PAGES:
1597 if (u64Arg)
1598 return VERR_INVALID_PARAMETER;
1599 rc = GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1600 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1601 break;
1602
1603 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1604 if (u64Arg)
1605 return VERR_INVALID_PARAMETER;
1606 rc = GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1607 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1608 break;
1609
1610 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1611 if (u64Arg)
1612 return VERR_INVALID_PARAMETER;
1613 rc = GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
1614 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1615 break;
1616
1617 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1618 if (idCpu == NIL_VMCPUID)
1619 return VERR_INVALID_CPU_ID;
1620 if (u64Arg)
1621 return VERR_INVALID_PARAMETER;
1622 rc = GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1623 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1624 break;
1625
1626 case VMMR0_DO_GMM_BALLOONED_PAGES:
1627 if (u64Arg)
1628 return VERR_INVALID_PARAMETER;
1629 rc = GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1630 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1631 break;
1632
1633 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1634 if (u64Arg)
1635 return VERR_INVALID_PARAMETER;
1636 rc = GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1637 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1638 break;
1639
1640 case VMMR0_DO_GMM_SEED_CHUNK:
1641 if (pReqHdr)
1642 return VERR_INVALID_PARAMETER;
1643 rc = GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
1644 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1645 break;
1646
1647 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1648 if (idCpu == NIL_VMCPUID)
1649 return VERR_INVALID_CPU_ID;
1650 if (u64Arg)
1651 return VERR_INVALID_PARAMETER;
1652 rc = GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1653 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1654 break;
1655
1656 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1657 if (idCpu == NIL_VMCPUID)
1658 return VERR_INVALID_CPU_ID;
1659 if (u64Arg)
1660 return VERR_INVALID_PARAMETER;
1661 rc = GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1662 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1663 break;
1664
1665 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1666 if (idCpu == NIL_VMCPUID)
1667 return VERR_INVALID_CPU_ID;
1668 if ( u64Arg
1669 || pReqHdr)
1670 return VERR_INVALID_PARAMETER;
1671 rc = GMMR0ResetSharedModules(pVM, idCpu);
1672 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1673 break;
1674
1675#ifdef VBOX_WITH_PAGE_SHARING
1676 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1677 {
1678 if (idCpu == NIL_VMCPUID)
1679 return VERR_INVALID_CPU_ID;
1680 if ( u64Arg
1681 || pReqHdr)
1682 return VERR_INVALID_PARAMETER;
1683
1684 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1685 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf());
1686
1687# ifdef DEBUG_sandervl
1688 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
1689 /* Todo: this can have bad side effects for unexpected jumps back to r3. */
1690 rc = GMMR0CheckSharedModulesStart(pVM);
1691 if (rc == VINF_SUCCESS)
1692 {
1693 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
1694 Assert( rc == VINF_SUCCESS
1695 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
1696 GMMR0CheckSharedModulesEnd(pVM);
1697 }
1698# else
1699 rc = GMMR0CheckSharedModules(pVM, pVCpu);
1700# endif
1701 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1702 break;
1703 }
1704#endif
1705
1706#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1707 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1708 if (u64Arg)
1709 return VERR_INVALID_PARAMETER;
1710 rc = GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1711 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1712 break;
1713#endif
1714
1715 case VMMR0_DO_GMM_QUERY_STATISTICS:
1716 if (u64Arg)
1717 return VERR_INVALID_PARAMETER;
1718 rc = GMMR0QueryStatisticsReq(pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1719 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1720 break;
1721
1722 case VMMR0_DO_GMM_RESET_STATISTICS:
1723 if (u64Arg)
1724 return VERR_INVALID_PARAMETER;
1725 rc = GMMR0ResetStatisticsReq(pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1726 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1727 break;
1728
1729 /*
1730 * A quick GCFGM mock-up.
1731 */
1732 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1733 case VMMR0_DO_GCFGM_SET_VALUE:
1734 case VMMR0_DO_GCFGM_QUERY_VALUE:
1735 {
1736 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1737 return VERR_INVALID_PARAMETER;
1738 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1739 if (pReq->Hdr.cbReq != sizeof(*pReq))
1740 return VERR_INVALID_PARAMETER;
1741 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1742 {
1743 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1744 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1745 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1746 }
1747 else
1748 {
1749 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1750 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1751 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1752 }
1753 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1754 break;
1755 }
1756
1757 /*
1758 * PDM Wrappers.
1759 */
1760 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1761 {
1762 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1763 return VERR_INVALID_PARAMETER;
1764 rc = PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1765 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1766 break;
1767 }
1768
1769 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1770 {
1771 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1772 return VERR_INVALID_PARAMETER;
1773 rc = PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1774 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1775 break;
1776 }
1777
1778 /*
1779 * Requests to the internal networking service.
1780 */
1781 case VMMR0_DO_INTNET_OPEN:
1782 {
1783 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1784 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1785 return VERR_INVALID_PARAMETER;
1786 rc = IntNetR0OpenReq(pSession, pReq);
1787 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1788 break;
1789 }
1790
1791 case VMMR0_DO_INTNET_IF_CLOSE:
1792 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1793 return VERR_INVALID_PARAMETER;
1794 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1795 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1796 break;
1797
1798
1799 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1800 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1801 return VERR_INVALID_PARAMETER;
1802 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1803 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1804 break;
1805
1806 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1807 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1808 return VERR_INVALID_PARAMETER;
1809 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1810 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1811 break;
1812
1813 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1814 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1815 return VERR_INVALID_PARAMETER;
1816 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1817 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1818 break;
1819
1820 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1821 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1822 return VERR_INVALID_PARAMETER;
1823 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1824 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1825 break;
1826
1827 case VMMR0_DO_INTNET_IF_SEND:
1828 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1829 return VERR_INVALID_PARAMETER;
1830 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1831 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1832 break;
1833
1834 case VMMR0_DO_INTNET_IF_WAIT:
1835 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1836 return VERR_INVALID_PARAMETER;
1837 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1838 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1839 break;
1840
1841 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1842 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1843 return VERR_INVALID_PARAMETER;
1844 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1845 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1846 break;
1847
1848#ifdef VBOX_WITH_PCI_PASSTHROUGH
1849 /*
1850 * Requests to host PCI driver service.
1851 */
1852 case VMMR0_DO_PCIRAW_REQ:
1853 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1854 return VERR_INVALID_PARAMETER;
1855 rc = PciRawR0ProcessReq(pSession, pVM, (PPCIRAWSENDREQ)pReqHdr);
1856 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1857 break;
1858#endif
1859 /*
1860 * For profiling.
1861 */
1862 case VMMR0_DO_NOP:
1863 case VMMR0_DO_SLOW_NOP:
1864 return VINF_SUCCESS;
1865
1866 /*
1867 * For testing Ring-0 APIs invoked in this environment.
1868 */
1869 case VMMR0_DO_TESTS:
1870 /** @todo make new test */
1871 return VINF_SUCCESS;
1872
1873
1874#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
1875 case VMMR0_DO_TEST_SWITCHER3264:
1876 if (idCpu == NIL_VMCPUID)
1877 return VERR_INVALID_CPU_ID;
1878 rc = HMR0TestSwitcher3264(pVM);
1879 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1880 break;
1881#endif
1882 default:
1883 /*
1884 * We're returning VERR_NOT_SUPPORT here so we've got something else
1885 * than -1 which the interrupt gate glue code might return.
1886 */
1887 Log(("operation %#x is not supported\n", enmOperation));
1888 return VERR_NOT_SUPPORTED;
1889 }
1890 return rc;
1891}
1892
1893
1894/**
1895 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1896 */
1897typedef struct VMMR0ENTRYEXARGS
1898{
1899 PVM pVM;
1900 VMCPUID idCpu;
1901 VMMR0OPERATION enmOperation;
1902 PSUPVMMR0REQHDR pReq;
1903 uint64_t u64Arg;
1904 PSUPDRVSESSION pSession;
1905} VMMR0ENTRYEXARGS;
1906/** Pointer to a vmmR0EntryExWrapper argument package. */
1907typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1908
1909/**
1910 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1911 *
1912 * @returns VBox status code.
1913 * @param pvArgs The argument package
1914 */
1915static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
1916{
1917 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1918 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1919 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1920 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1921 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1922 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1923}
1924
1925
1926/**
1927 * The Ring 0 entry point, called by the support library (SUP).
1928 *
1929 * @returns VBox status code.
1930 * @param pVM Pointer to the VM.
1931 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1932 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1933 * @param enmOperation Which operation to execute.
1934 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
1935 * @param u64Arg Some simple constant argument.
1936 * @param pSession The session of the caller.
1937 * @remarks Assume called with interrupts _enabled_.
1938 */
1939VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1940{
1941 /*
1942 * Requests that should only happen on the EMT thread will be
1943 * wrapped in a setjmp so we can assert without causing trouble.
1944 */
1945 if ( VALID_PTR(pVM)
1946 && pVM->pVMR0
1947 && idCpu < pVM->cCpus)
1948 {
1949 switch (enmOperation)
1950 {
1951 /* These might/will be called before VMMR3Init. */
1952 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1953 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1954 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1955 case VMMR0_DO_GMM_FREE_PAGES:
1956 case VMMR0_DO_GMM_BALLOONED_PAGES:
1957 /* On the mac we might not have a valid jmp buf, so check these as well. */
1958 case VMMR0_DO_VMMR0_INIT:
1959 case VMMR0_DO_VMMR0_TERM:
1960 {
1961 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1962
1963 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1964 break;
1965
1966 /** @todo validate this EMT claim... GVM knows. */
1967 VMMR0ENTRYEXARGS Args;
1968 Args.pVM = pVM;
1969 Args.idCpu = idCpu;
1970 Args.enmOperation = enmOperation;
1971 Args.pReq = pReq;
1972 Args.u64Arg = u64Arg;
1973 Args.pSession = pSession;
1974 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1975 }
1976
1977 default:
1978 break;
1979 }
1980 }
1981 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
1982}
1983
1984
1985/**
1986 * Checks whether we've armed the ring-0 long jump machinery.
1987 *
1988 * @returns @c true / @c false
1989 * @param pVCpu Pointer to the VMCPU.
1990 * @thread EMT
1991 * @sa VMMIsLongJumpArmed
1992 */
1993VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
1994{
1995#ifdef RT_ARCH_X86
1996 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
1997 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
1998#else
1999 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2000 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2001#endif
2002}
2003
2004
2005/**
2006 * Checks whether we've done a ring-3 long jump.
2007 *
2008 * @returns @c true / @c false
2009 * @param pVCpu Pointer to the VMCPU.
2010 * @thread EMT
2011 */
2012VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
2013{
2014 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2015}
2016
2017
2018/**
2019 * Internal R0 logger worker: Flush logger.
2020 *
2021 * @param pLogger The logger instance to flush.
2022 * @remark This function must be exported!
2023 */
2024VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2025{
2026#ifdef LOG_ENABLED
2027 /*
2028 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2029 * (This is a bit paranoid code.)
2030 */
2031 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2032 if ( !VALID_PTR(pR0Logger)
2033 || !VALID_PTR(pR0Logger + 1)
2034 || pLogger->u32Magic != RTLOGGER_MAGIC)
2035 {
2036# ifdef DEBUG
2037 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2038# endif
2039 return;
2040 }
2041 if (pR0Logger->fFlushingDisabled)
2042 return; /* quietly */
2043
2044 PVM pVM = pR0Logger->pVM;
2045 if ( !VALID_PTR(pVM)
2046 || pVM->pVMR0 != pVM)
2047 {
2048# ifdef DEBUG
2049 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
2050# endif
2051 return;
2052 }
2053
2054 PVMCPU pVCpu = VMMGetCpu(pVM);
2055 if (pVCpu)
2056 {
2057 /*
2058 * Check that the jump buffer is armed.
2059 */
2060# ifdef RT_ARCH_X86
2061 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2062 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2063# else
2064 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2065 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2066# endif
2067 {
2068# ifdef DEBUG
2069 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2070# endif
2071 return;
2072 }
2073 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2074 }
2075# ifdef DEBUG
2076 else
2077 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2078# endif
2079#else
2080 NOREF(pLogger);
2081#endif /* LOG_ENABLED */
2082}
2083
2084/**
2085 * Internal R0 logger worker: Custom prefix.
2086 *
2087 * @returns Number of chars written.
2088 *
2089 * @param pLogger The logger instance.
2090 * @param pchBuf The output buffer.
2091 * @param cchBuf The size of the buffer.
2092 * @param pvUser User argument (ignored).
2093 */
2094VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
2095{
2096 NOREF(pvUser);
2097#ifdef LOG_ENABLED
2098 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2099 if ( !VALID_PTR(pR0Logger)
2100 || !VALID_PTR(pR0Logger + 1)
2101 || pLogger->u32Magic != RTLOGGER_MAGIC
2102 || cchBuf < 2)
2103 return 0;
2104
2105 static const char s_szHex[17] = "0123456789abcdef";
2106 VMCPUID const idCpu = pR0Logger->idCpu;
2107 pchBuf[1] = s_szHex[ idCpu & 15];
2108 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
2109
2110 return 2;
2111#else
2112 NOREF(pLogger); NOREF(pcbBuf); NOREF(cchBuf);
2113 return 0;
2114#endif
2115}
2116
2117#ifdef LOG_ENABLED
2118
2119/**
2120 * Disables flushing of the ring-0 debug log.
2121 *
2122 * @param pVCpu Pointer to the VMCPU.
2123 */
2124VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
2125{
2126 if (pVCpu->vmm.s.pR0LoggerR0)
2127 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2128}
2129
2130
2131/**
2132 * Enables flushing of the ring-0 debug log.
2133 *
2134 * @param pVCpu Pointer to the VMCPU.
2135 */
2136VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
2137{
2138 if (pVCpu->vmm.s.pR0LoggerR0)
2139 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2140}
2141
2142
2143/**
2144 * Checks if log flushing is disabled or not.
2145 *
2146 * @param pVCpu Pointer to the VMCPU.
2147 */
2148VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
2149{
2150 if (pVCpu->vmm.s.pR0LoggerR0)
2151 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2152 return true;
2153}
2154#endif /* LOG_ENABLED */
2155
2156/**
2157 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2158 *
2159 * @returns true if the breakpoint should be hit, false if it should be ignored.
2160 */
2161DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2162{
2163#if 0
2164 return true;
2165#else
2166 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2167 if (pVM)
2168 {
2169 PVMCPU pVCpu = VMMGetCpu(pVM);
2170
2171 if (pVCpu)
2172 {
2173#ifdef RT_ARCH_X86
2174 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2175 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2176#else
2177 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2178 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2179#endif
2180 {
2181 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2182 return RT_FAILURE_NP(rc);
2183 }
2184 }
2185 }
2186#ifdef RT_OS_LINUX
2187 return true;
2188#else
2189 return false;
2190#endif
2191#endif
2192}
2193
2194
2195/**
2196 * Override this so we can push it up to ring-3.
2197 *
2198 * @param pszExpr Expression. Can be NULL.
2199 * @param uLine Location line number.
2200 * @param pszFile Location file name.
2201 * @param pszFunction Location function name.
2202 */
2203DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2204{
2205 /*
2206 * To the log.
2207 */
2208 LogAlways(("\n!!R0-Assertion Failed!!\n"
2209 "Expression: %s\n"
2210 "Location : %s(%d) %s\n",
2211 pszExpr, pszFile, uLine, pszFunction));
2212
2213 /*
2214 * To the global VMM buffer.
2215 */
2216 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2217 if (pVM)
2218 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2219 "\n!!R0-Assertion Failed!!\n"
2220 "Expression: %s\n"
2221 "Location : %s(%d) %s\n",
2222 pszExpr, pszFile, uLine, pszFunction);
2223
2224 /*
2225 * Continue the normal way.
2226 */
2227 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2228}
2229
2230
2231/**
2232 * Callback for RTLogFormatV which writes to the ring-3 log port.
2233 * See PFNLOGOUTPUT() for details.
2234 */
2235static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2236{
2237 for (size_t i = 0; i < cbChars; i++)
2238 {
2239 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2240 }
2241
2242 NOREF(pv);
2243 return cbChars;
2244}
2245
2246
2247/**
2248 * Override this so we can push it up to ring-3.
2249 *
2250 * @param pszFormat The format string.
2251 * @param va Arguments.
2252 */
2253DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2254{
2255 va_list vaCopy;
2256
2257 /*
2258 * Push the message to the loggers.
2259 */
2260 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2261 if (pLog)
2262 {
2263 va_copy(vaCopy, va);
2264 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2265 va_end(vaCopy);
2266 }
2267 pLog = RTLogRelGetDefaultInstance();
2268 if (pLog)
2269 {
2270 va_copy(vaCopy, va);
2271 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2272 va_end(vaCopy);
2273 }
2274
2275 /*
2276 * Push it to the global VMM buffer.
2277 */
2278 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2279 if (pVM)
2280 {
2281 va_copy(vaCopy, va);
2282 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2283 va_end(vaCopy);
2284 }
2285
2286 /*
2287 * Continue the normal way.
2288 */
2289 RTAssertMsg2V(pszFormat, va);
2290}
2291
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette