VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 69801

Last change on this file since 69801 was 69111, checked in by vboxsync, 7 years ago

(C) year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 84.5 KB
Line 
1/* $Id: VMMR0.cpp 69111 2017-10-17 14:26:02Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/stam.h>
30#include <VBox/vmm/tm.h>
31#include "VMMInternal.h"
32#include <VBox/vmm/vm.h>
33#include <VBox/vmm/gvm.h>
34#ifdef VBOX_WITH_PCI_PASSTHROUGH
35# include <VBox/vmm/pdmpci.h>
36#endif
37#include <VBox/vmm/apic.h>
38
39#include <VBox/vmm/gvmm.h>
40#include <VBox/vmm/gmm.h>
41#include <VBox/vmm/gim.h>
42#include <VBox/intnet.h>
43#include <VBox/vmm/hm.h>
44#include <VBox/param.h>
45#include <VBox/err.h>
46#include <VBox/version.h>
47#include <VBox/log.h>
48
49#include <iprt/asm-amd64-x86.h>
50#include <iprt/assert.h>
51#include <iprt/crc.h>
52#include <iprt/mp.h>
53#include <iprt/once.h>
54#include <iprt/stdarg.h>
55#include <iprt/string.h>
56#include <iprt/thread.h>
57#include <iprt/timer.h>
58
59#include "dtrace/VBoxVMM.h"
60
61
62#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
63# pragma intrinsic(_AddressOfReturnAddress)
64#endif
65
66#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
67# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
68#endif
69
70
71
72/*********************************************************************************************************************************
73* Defined Constants And Macros *
74*********************************************************************************************************************************/
75/** @def VMM_CHECK_SMAP_SETUP
76 * SMAP check setup. */
77/** @def VMM_CHECK_SMAP_CHECK
78 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
79 * it will be logged and @a a_BadExpr is executed. */
80/** @def VMM_CHECK_SMAP_CHECK2
81 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
82 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
83 * executed. */
84#if defined(VBOX_STRICT) || 1
85# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
86# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
87 do { \
88 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
89 { \
90 RTCCUINTREG fEflCheck = ASMGetFlags(); \
91 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
92 { /* likely */ } \
93 else \
94 { \
95 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
96 a_BadExpr; \
97 } \
98 } \
99 } while (0)
100# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
101 do { \
102 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
103 { \
104 RTCCUINTREG fEflCheck = ASMGetFlags(); \
105 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
106 { /* likely */ } \
107 else \
108 { \
109 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
110 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
111 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
112 a_BadExpr; \
113 } \
114 } \
115 } while (0)
116#else
117# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
118# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
119# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) NOREF(fKernelFeatures)
120#endif
121
122
123/*********************************************************************************************************************************
124* Internal Functions *
125*********************************************************************************************************************************/
126RT_C_DECLS_BEGIN
127#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
128extern uint64_t __udivdi3(uint64_t, uint64_t);
129extern uint64_t __umoddi3(uint64_t, uint64_t);
130#endif
131RT_C_DECLS_END
132
133
134/*********************************************************************************************************************************
135* Global Variables *
136*********************************************************************************************************************************/
137/** Drag in necessary library bits.
138 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
139PFNRT g_VMMR0Deps[] =
140{
141 (PFNRT)RTCrc32,
142 (PFNRT)RTOnce,
143#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
144 (PFNRT)__udivdi3,
145 (PFNRT)__umoddi3,
146#endif
147 NULL
148};
149
150#ifdef RT_OS_SOLARIS
151/* Dependency information for the native solaris loader. */
152extern "C" { char _depends_on[] = "vboxdrv"; }
153#endif
154
155
156
157/**
158 * Initialize the module.
159 * This is called when we're first loaded.
160 *
161 * @returns 0 on success.
162 * @returns VBox status on failure.
163 * @param hMod Image handle for use in APIs.
164 */
165DECLEXPORT(int) ModuleInit(void *hMod)
166{
167 VMM_CHECK_SMAP_SETUP();
168 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
169
170#ifdef VBOX_WITH_DTRACE_R0
171 /*
172 * The first thing to do is register the static tracepoints.
173 * (Deregistration is automatic.)
174 */
175 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
176 if (RT_FAILURE(rc2))
177 return rc2;
178#endif
179 LogFlow(("ModuleInit:\n"));
180
181#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
182 /*
183 * Display the CMOS debug code.
184 */
185 ASMOutU8(0x72, 0x03);
186 uint8_t bDebugCode = ASMInU8(0x73);
187 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
188 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
189#endif
190
191 /*
192 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
193 */
194 int rc = vmmInitFormatTypes();
195 if (RT_SUCCESS(rc))
196 {
197 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
198 rc = GVMMR0Init();
199 if (RT_SUCCESS(rc))
200 {
201 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
202 rc = GMMR0Init();
203 if (RT_SUCCESS(rc))
204 {
205 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
206 rc = HMR0Init();
207 if (RT_SUCCESS(rc))
208 {
209 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
210 rc = PGMRegisterStringFormatTypes();
211 if (RT_SUCCESS(rc))
212 {
213 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
214#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
215 rc = PGMR0DynMapInit();
216#endif
217 if (RT_SUCCESS(rc))
218 {
219 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
220 rc = IntNetR0Init();
221 if (RT_SUCCESS(rc))
222 {
223#ifdef VBOX_WITH_PCI_PASSTHROUGH
224 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
225 rc = PciRawR0Init();
226#endif
227 if (RT_SUCCESS(rc))
228 {
229 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
230 rc = CPUMR0ModuleInit();
231 if (RT_SUCCESS(rc))
232 {
233#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
234 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
235 rc = vmmR0TripleFaultHackInit();
236 if (RT_SUCCESS(rc))
237#endif
238 {
239 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
240 if (RT_SUCCESS(rc))
241 {
242 LogFlow(("ModuleInit: returns success.\n"));
243 return VINF_SUCCESS;
244 }
245 }
246
247 /*
248 * Bail out.
249 */
250#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
251 vmmR0TripleFaultHackTerm();
252#endif
253 }
254 else
255 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
256#ifdef VBOX_WITH_PCI_PASSTHROUGH
257 PciRawR0Term();
258#endif
259 }
260 else
261 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
262 IntNetR0Term();
263 }
264 else
265 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
266#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
267 PGMR0DynMapTerm();
268#endif
269 }
270 else
271 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
272 PGMDeregisterStringFormatTypes();
273 }
274 else
275 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
276 HMR0Term();
277 }
278 else
279 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
280 GMMR0Term();
281 }
282 else
283 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
284 GVMMR0Term();
285 }
286 else
287 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
288 vmmTermFormatTypes();
289 }
290 else
291 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
292
293 LogFlow(("ModuleInit: failed %Rrc\n", rc));
294 return rc;
295}
296
297
298/**
299 * Terminate the module.
300 * This is called when we're finally unloaded.
301 *
302 * @param hMod Image handle for use in APIs.
303 */
304DECLEXPORT(void) ModuleTerm(void *hMod)
305{
306 NOREF(hMod);
307 LogFlow(("ModuleTerm:\n"));
308
309 /*
310 * Terminate the CPUM module (Local APIC cleanup).
311 */
312 CPUMR0ModuleTerm();
313
314 /*
315 * Terminate the internal network service.
316 */
317 IntNetR0Term();
318
319 /*
320 * PGM (Darwin), HM and PciRaw global cleanup.
321 */
322#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
323 PGMR0DynMapTerm();
324#endif
325#ifdef VBOX_WITH_PCI_PASSTHROUGH
326 PciRawR0Term();
327#endif
328 PGMDeregisterStringFormatTypes();
329 HMR0Term();
330#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
331 vmmR0TripleFaultHackTerm();
332#endif
333
334 /*
335 * Destroy the GMM and GVMM instances.
336 */
337 GMMR0Term();
338 GVMMR0Term();
339
340 vmmTermFormatTypes();
341
342 LogFlow(("ModuleTerm: returns\n"));
343}
344
345
346/**
347 * Initiates the R0 driver for a particular VM instance.
348 *
349 * @returns VBox status code.
350 *
351 * @param pGVM The global (ring-0) VM structure.
352 * @param pVM The cross context VM structure.
353 * @param uSvnRev The SVN revision of the ring-3 part.
354 * @param uBuildType Build type indicator.
355 * @thread EMT(0)
356 */
357static int vmmR0InitVM(PGVM pGVM, PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
358{
359 VMM_CHECK_SMAP_SETUP();
360 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
361
362 /*
363 * Match the SVN revisions and build type.
364 */
365 if (uSvnRev != VMMGetSvnRev())
366 {
367 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
368 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
369 return VERR_VMM_R0_VERSION_MISMATCH;
370 }
371 if (uBuildType != vmmGetBuildType())
372 {
373 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
374 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
375 return VERR_VMM_R0_VERSION_MISMATCH;
376 }
377
378 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0 /*idCpu*/);
379 if (RT_FAILURE(rc))
380 return rc;
381
382
383#ifdef LOG_ENABLED
384 /*
385 * Register the EMT R0 logger instance for VCPU 0.
386 */
387 PVMCPU pVCpu = &pVM->aCpus[0];
388
389 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
390 if (pR0Logger)
391 {
392# if 0 /* testing of the logger. */
393 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
394 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
395 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
396 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
397
398 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
399 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
400 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
401 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
402
403 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
404 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
405 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
406 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
407
408 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
409 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
410 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
411 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
412 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
413 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
414
415 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
416 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
417
418 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
419 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
420 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
421# endif
422 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
423 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
424 pR0Logger->fRegistered = true;
425 }
426#endif /* LOG_ENABLED */
427
428 /*
429 * Check if the host supports high resolution timers or not.
430 */
431 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
432 && !RTTimerCanDoHighResolution())
433 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
434
435 /*
436 * Initialize the per VM data for GVMM and GMM.
437 */
438 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
439 rc = GVMMR0InitVM(pGVM);
440// if (RT_SUCCESS(rc))
441// rc = GMMR0InitPerVMData(pVM);
442 if (RT_SUCCESS(rc))
443 {
444 /*
445 * Init HM, CPUM and PGM (Darwin only).
446 */
447 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
448 rc = HMR0InitVM(pVM);
449 if (RT_SUCCESS(rc))
450 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
451 if (RT_SUCCESS(rc))
452 {
453 rc = CPUMR0InitVM(pVM);
454 if (RT_SUCCESS(rc))
455 {
456 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
457#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
458 rc = PGMR0DynMapInitVM(pVM);
459#endif
460 if (RT_SUCCESS(rc))
461 {
462 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
463#ifdef VBOX_WITH_PCI_PASSTHROUGH
464 rc = PciRawR0InitVM(pGVM, pVM);
465#endif
466 if (RT_SUCCESS(rc))
467 {
468 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
469 rc = GIMR0InitVM(pVM);
470 if (RT_SUCCESS(rc))
471 {
472 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
473 if (RT_SUCCESS(rc))
474 {
475 GVMMR0DoneInitVM(pGVM);
476 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
477 return rc;
478 }
479
480 /* bail out*/
481 GIMR0TermVM(pVM);
482 }
483#ifdef VBOX_WITH_PCI_PASSTHROUGH
484 PciRawR0TermVM(pGVM, pVM);
485#endif
486 }
487 }
488 }
489 HMR0TermVM(pVM);
490 }
491 }
492
493 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
494 return rc;
495}
496
497
498/**
499 * Terminates the R0 bits for a particular VM instance.
500 *
501 * This is normally called by ring-3 as part of the VM termination process, but
502 * may alternatively be called during the support driver session cleanup when
503 * the VM object is destroyed (see GVMM).
504 *
505 * @returns VBox status code.
506 *
507 * @param pGVM The global (ring-0) VM structure.
508 * @param pVM The cross context VM structure.
509 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
510 * thread.
511 * @thread EMT(0) or session clean up thread.
512 */
513VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, PVM pVM, VMCPUID idCpu)
514{
515 /*
516 * Check EMT(0) claim if we're called from userland.
517 */
518 if (idCpu != NIL_VMCPUID)
519 {
520 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
521 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
522 if (RT_FAILURE(rc))
523 return rc;
524 }
525
526#ifdef VBOX_WITH_PCI_PASSTHROUGH
527 PciRawR0TermVM(pGVM, pVM);
528#endif
529
530 /*
531 * Tell GVMM what we're up to and check that we only do this once.
532 */
533 if (GVMMR0DoingTermVM(pGVM))
534 {
535 GIMR0TermVM(pVM);
536
537 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
538 * here to make sure we don't leak any shared pages if we crash... */
539#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
540 PGMR0DynMapTermVM(pVM);
541#endif
542 HMR0TermVM(pVM);
543 }
544
545 /*
546 * Deregister the logger.
547 */
548 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
549 return VINF_SUCCESS;
550}
551
552
553/**
554 * VMM ring-0 thread-context callback.
555 *
556 * This does common HM state updating and calls the HM-specific thread-context
557 * callback.
558 *
559 * @param enmEvent The thread-context event.
560 * @param pvUser Opaque pointer to the VMCPU.
561 *
562 * @thread EMT(pvUser)
563 */
564static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
565{
566 PVMCPU pVCpu = (PVMCPU)pvUser;
567
568 switch (enmEvent)
569 {
570 case RTTHREADCTXEVENT_IN:
571 {
572 /*
573 * Linux may call us with preemption enabled (really!) but technically we
574 * cannot get preempted here, otherwise we end up in an infinite recursion
575 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
576 * ad infinitum). Let's just disable preemption for now...
577 */
578 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
579 * preemption after doing the callout (one or two functions up the
580 * call chain). */
581 /** @todo r=ramshankar: See @bugref{5313#c30}. */
582 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
583 RTThreadPreemptDisable(&ParanoidPreemptState);
584
585 /* We need to update the VCPU <-> host CPU mapping. */
586 RTCPUID idHostCpu;
587 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
588 pVCpu->iHostCpuSet = iHostCpuSet;
589 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
590
591 /* In the very unlikely event that the GIP delta for the CPU we're
592 rescheduled needs calculating, try force a return to ring-3.
593 We unfortunately cannot do the measurements right here. */
594 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
595 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
596
597 /* Invoke the HM-specific thread-context callback. */
598 HMR0ThreadCtxCallback(enmEvent, pvUser);
599
600 /* Restore preemption. */
601 RTThreadPreemptRestore(&ParanoidPreemptState);
602 break;
603 }
604
605 case RTTHREADCTXEVENT_OUT:
606 {
607 /* Invoke the HM-specific thread-context callback. */
608 HMR0ThreadCtxCallback(enmEvent, pvUser);
609
610 /*
611 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
612 * have the same host CPU associated with it.
613 */
614 pVCpu->iHostCpuSet = UINT32_MAX;
615 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
616 break;
617 }
618
619 default:
620 /* Invoke the HM-specific thread-context callback. */
621 HMR0ThreadCtxCallback(enmEvent, pvUser);
622 break;
623 }
624}
625
626
627/**
628 * Creates thread switching hook for the current EMT thread.
629 *
630 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
631 * platform does not implement switcher hooks, no hooks will be create and the
632 * member set to NIL_RTTHREADCTXHOOK.
633 *
634 * @returns VBox status code.
635 * @param pVCpu The cross context virtual CPU structure.
636 * @thread EMT(pVCpu)
637 */
638VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu)
639{
640 VMCPU_ASSERT_EMT(pVCpu);
641 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
642
643#if 1 /* To disable this stuff change to zero. */
644 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
645 if (RT_SUCCESS(rc))
646 return rc;
647#else
648 RT_NOREF(vmmR0ThreadCtxCallback);
649 int rc = VERR_NOT_SUPPORTED;
650#endif
651
652 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
653 if (rc == VERR_NOT_SUPPORTED)
654 return VINF_SUCCESS;
655
656 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
657 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
658}
659
660
661/**
662 * Destroys the thread switching hook for the specified VCPU.
663 *
664 * @param pVCpu The cross context virtual CPU structure.
665 * @remarks Can be called from any thread.
666 */
667VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu)
668{
669 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
670 AssertRC(rc);
671 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
672}
673
674
675/**
676 * Disables the thread switching hook for this VCPU (if we got one).
677 *
678 * @param pVCpu The cross context virtual CPU structure.
679 * @thread EMT(pVCpu)
680 *
681 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
682 * this call. This means you have to be careful with what you do!
683 */
684VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu)
685{
686 /*
687 * Clear the VCPU <-> host CPU mapping as we've left HM context.
688 * @bugref{7726#c19} explains the need for this trick:
689 *
690 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
691 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
692 * longjmp & normal return to ring-3, which opens a window where we may be
693 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
694 * the CPU starts executing a different EMT. Both functions first disables
695 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
696 * an opening for getting preempted.
697 */
698 /** @todo Make HM not need this API! Then we could leave the hooks enabled
699 * all the time. */
700 /** @todo move this into the context hook disabling if(). */
701 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
702
703 /*
704 * Disable the context hook, if we got one.
705 */
706 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
707 {
708 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
709 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
710 AssertRC(rc);
711 }
712}
713
714
715/**
716 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
717 *
718 * @returns true if registered, false otherwise.
719 * @param pVCpu The cross context virtual CPU structure.
720 */
721DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
722{
723 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
724}
725
726
727/**
728 * Whether thread-context hooks are registered for this VCPU.
729 *
730 * @returns true if registered, false otherwise.
731 * @param pVCpu The cross context virtual CPU structure.
732 */
733VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
734{
735 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
736}
737
738
739#ifdef VBOX_WITH_STATISTICS
740/**
741 * Record return code statistics
742 * @param pVM The cross context VM structure.
743 * @param pVCpu The cross context virtual CPU structure.
744 * @param rc The status code.
745 */
746static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
747{
748 /*
749 * Collect statistics.
750 */
751 switch (rc)
752 {
753 case VINF_SUCCESS:
754 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
755 break;
756 case VINF_EM_RAW_INTERRUPT:
757 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
758 break;
759 case VINF_EM_RAW_INTERRUPT_HYPER:
760 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
761 break;
762 case VINF_EM_RAW_GUEST_TRAP:
763 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
764 break;
765 case VINF_EM_RAW_RING_SWITCH:
766 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
767 break;
768 case VINF_EM_RAW_RING_SWITCH_INT:
769 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
770 break;
771 case VINF_EM_RAW_STALE_SELECTOR:
772 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
773 break;
774 case VINF_EM_RAW_IRET_TRAP:
775 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
776 break;
777 case VINF_IOM_R3_IOPORT_READ:
778 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
779 break;
780 case VINF_IOM_R3_IOPORT_WRITE:
781 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
782 break;
783 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
784 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
785 break;
786 case VINF_IOM_R3_MMIO_READ:
787 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
788 break;
789 case VINF_IOM_R3_MMIO_WRITE:
790 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
791 break;
792 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
793 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
794 break;
795 case VINF_IOM_R3_MMIO_READ_WRITE:
796 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
797 break;
798 case VINF_PATM_HC_MMIO_PATCH_READ:
799 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
800 break;
801 case VINF_PATM_HC_MMIO_PATCH_WRITE:
802 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
803 break;
804 case VINF_CPUM_R3_MSR_READ:
805 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
806 break;
807 case VINF_CPUM_R3_MSR_WRITE:
808 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
809 break;
810 case VINF_EM_RAW_EMULATE_INSTR:
811 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
812 break;
813 case VINF_EM_RAW_EMULATE_IO_BLOCK:
814 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
815 break;
816 case VINF_PATCH_EMULATE_INSTR:
817 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
818 break;
819 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
820 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
821 break;
822 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
823 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
824 break;
825 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
826 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
827 break;
828 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
829 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
830 break;
831 case VINF_CSAM_PENDING_ACTION:
832 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
833 break;
834 case VINF_PGM_SYNC_CR3:
835 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
836 break;
837 case VINF_PATM_PATCH_INT3:
838 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
839 break;
840 case VINF_PATM_PATCH_TRAP_PF:
841 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
842 break;
843 case VINF_PATM_PATCH_TRAP_GP:
844 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
845 break;
846 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
847 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
848 break;
849 case VINF_EM_RESCHEDULE_REM:
850 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
851 break;
852 case VINF_EM_RAW_TO_R3:
853 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
854 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
855 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
856 else if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
857 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
858 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES))
859 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
860 else if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
861 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
862 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
863 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
864 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))
865 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
866 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
867 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
868 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3))
869 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
870 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
871 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
872 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
873 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
874 else
875 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
876 break;
877
878 case VINF_EM_RAW_TIMER_PENDING:
879 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
880 break;
881 case VINF_EM_RAW_INTERRUPT_PENDING:
882 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
883 break;
884 case VINF_VMM_CALL_HOST:
885 switch (pVCpu->vmm.s.enmCallRing3Operation)
886 {
887 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
888 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
889 break;
890 case VMMCALLRING3_PDM_LOCK:
891 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
892 break;
893 case VMMCALLRING3_PGM_POOL_GROW:
894 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
895 break;
896 case VMMCALLRING3_PGM_LOCK:
897 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
898 break;
899 case VMMCALLRING3_PGM_MAP_CHUNK:
900 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
901 break;
902 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
903 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
904 break;
905 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
906 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
907 break;
908 case VMMCALLRING3_VMM_LOGGER_FLUSH:
909 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
910 break;
911 case VMMCALLRING3_VM_SET_ERROR:
912 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
913 break;
914 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
915 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
916 break;
917 case VMMCALLRING3_VM_R0_ASSERTION:
918 default:
919 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
920 break;
921 }
922 break;
923 case VINF_PATM_DUPLICATE_FUNCTION:
924 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
925 break;
926 case VINF_PGM_CHANGE_MODE:
927 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
928 break;
929 case VINF_PGM_POOL_FLUSH_PENDING:
930 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
931 break;
932 case VINF_EM_PENDING_REQUEST:
933 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
934 break;
935 case VINF_EM_HM_PATCH_TPR_INSTR:
936 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
937 break;
938 default:
939 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
940 break;
941 }
942}
943#endif /* VBOX_WITH_STATISTICS */
944
945
946/**
947 * The Ring 0 entry point, called by the fast-ioctl path.
948 *
949 * @param pGVM The global (ring-0) VM structure.
950 * @param pVM The cross context VM structure.
951 * The return code is stored in pVM->vmm.s.iLastGZRc.
952 * @param idCpu The Virtual CPU ID of the calling EMT.
953 * @param enmOperation Which operation to execute.
954 * @remarks Assume called with interrupts _enabled_.
955 */
956VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
957{
958 /*
959 * Validation.
960 */
961 if ( idCpu < pGVM->cCpus
962 && pGVM->cCpus == pVM->cCpus)
963 { /*likely*/ }
964 else
965 {
966 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x/%#x\n", idCpu, pGVM->cCpus, pVM->cCpus);
967 return;
968 }
969
970 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
971 PVMCPU pVCpu = &pVM->aCpus[idCpu];
972 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
973 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
974 && pVCpu->hNativeThreadR0 == hNativeThread))
975 { /* likely */ }
976 else
977 {
978 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pVCpu->hNativeThreadR0=%p\n",
979 idCpu, hNativeThread, pGVCpu->hEMT, pVCpu->hNativeThreadR0);
980 return;
981 }
982
983 /*
984 * SMAP fun.
985 */
986 VMM_CHECK_SMAP_SETUP();
987 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
988
989 /*
990 * Perform requested operation.
991 */
992 switch (enmOperation)
993 {
994 /*
995 * Switch to GC and run guest raw mode code.
996 * Disable interrupts before doing the world switch.
997 */
998 case VMMR0_DO_RAW_RUN:
999 {
1000#ifdef VBOX_WITH_RAW_MODE
1001# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1002 /* Some safety precautions first. */
1003 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1004 {
1005 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
1006 break;
1007 }
1008# endif
1009
1010 /*
1011 * Disable preemption.
1012 */
1013 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1014 RTThreadPreemptDisable(&PreemptState);
1015
1016 /*
1017 * Get the host CPU identifiers, make sure they are valid and that
1018 * we've got a TSC delta for the CPU.
1019 */
1020 RTCPUID idHostCpu;
1021 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1022 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1023 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1024 {
1025 /*
1026 * Commit the CPU identifiers and update the periodict preemption timer if it's active.
1027 */
1028# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1029 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1030# endif
1031 pVCpu->iHostCpuSet = iHostCpuSet;
1032 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1033
1034 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1035 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1036
1037 /*
1038 * We might need to disable VT-x if the active switcher turns off paging.
1039 */
1040 bool fVTxDisabled;
1041 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1042 if (RT_SUCCESS(rc))
1043 {
1044 /*
1045 * Disable interrupts and run raw-mode code. The loop is for efficiently
1046 * dispatching tracepoints that fired in raw-mode context.
1047 */
1048 RTCCUINTREG uFlags = ASMIntDisableFlags();
1049
1050 for (;;)
1051 {
1052 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
1053 TMNotifyStartOfExecution(pVCpu);
1054
1055 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1056 pVCpu->vmm.s.iLastGZRc = rc;
1057
1058 TMNotifyEndOfExecution(pVCpu);
1059 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1060
1061 if (rc != VINF_VMM_CALL_TRACER)
1062 break;
1063 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
1064 }
1065
1066 /*
1067 * Re-enable VT-x before we dispatch any pending host interrupts and
1068 * re-enables interrupts.
1069 */
1070 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1071
1072 if ( rc == VINF_EM_RAW_INTERRUPT
1073 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1074 TRPMR0DispatchHostInterrupt(pVM);
1075
1076 ASMSetFlags(uFlags);
1077
1078 /* Fire dtrace probe and collect statistics. */
1079 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1080# ifdef VBOX_WITH_STATISTICS
1081 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1082 vmmR0RecordRC(pVM, pVCpu, rc);
1083# endif
1084 }
1085 else
1086 pVCpu->vmm.s.iLastGZRc = rc;
1087
1088 /*
1089 * Invalidate the host CPU identifiers as we restore preemption.
1090 */
1091 pVCpu->iHostCpuSet = UINT32_MAX;
1092 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1093
1094 RTThreadPreemptRestore(&PreemptState);
1095 }
1096 /*
1097 * Invalid CPU set index or TSC delta in need of measuring.
1098 */
1099 else
1100 {
1101 RTThreadPreemptRestore(&PreemptState);
1102 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1103 {
1104 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1105 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1106 0 /*default cTries*/);
1107 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1108 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1109 else
1110 pVCpu->vmm.s.iLastGZRc = rc;
1111 }
1112 else
1113 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1114 }
1115
1116#else /* !VBOX_WITH_RAW_MODE */
1117 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_NOT_SUPPORTED;
1118#endif
1119 break;
1120 }
1121
1122 /*
1123 * Run guest code using the available hardware acceleration technology.
1124 */
1125 case VMMR0_DO_HM_RUN:
1126 {
1127 /*
1128 * Disable preemption.
1129 */
1130 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1131 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1132 RTThreadPreemptDisable(&PreemptState);
1133
1134 /*
1135 * Get the host CPU identifiers, make sure they are valid and that
1136 * we've got a TSC delta for the CPU.
1137 */
1138 RTCPUID idHostCpu;
1139 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1140 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1141 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1142 {
1143 pVCpu->iHostCpuSet = iHostCpuSet;
1144 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1145
1146 /*
1147 * Update the periodic preemption timer if it's active.
1148 */
1149 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1150 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1151 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1152
1153#ifdef LOG_ENABLED
1154 /*
1155 * Ugly: Lazy registration of ring 0 loggers.
1156 */
1157 if (pVCpu->idCpu > 0)
1158 {
1159 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
1160 if ( pR0Logger
1161 && RT_UNLIKELY(!pR0Logger->fRegistered))
1162 {
1163 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
1164 pR0Logger->fRegistered = true;
1165 }
1166 }
1167#endif
1168
1169#ifdef VMM_R0_TOUCH_FPU
1170 /*
1171 * Make sure we've got the FPU state loaded so and we don't need to clear
1172 * CR0.TS and get out of sync with the host kernel when loading the guest
1173 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1174 */
1175 CPUMR0TouchHostFpu();
1176#endif
1177 int rc;
1178 bool fPreemptRestored = false;
1179 if (!HMR0SuspendPending())
1180 {
1181 /*
1182 * Enable the context switching hook.
1183 */
1184 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1185 {
1186 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1187 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1188 }
1189
1190 /*
1191 * Enter HM context.
1192 */
1193 rc = HMR0Enter(pVM, pVCpu);
1194 if (RT_SUCCESS(rc))
1195 {
1196 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1197
1198 /*
1199 * When preemption hooks are in place, enable preemption now that
1200 * we're in HM context.
1201 */
1202 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1203 {
1204 fPreemptRestored = true;
1205 RTThreadPreemptRestore(&PreemptState);
1206 }
1207
1208 /*
1209 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1210 */
1211 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1212 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1213 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1214
1215 /*
1216 * Assert sanity on the way out. Using manual assertions code here as normal
1217 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1218 */
1219 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1220 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1221 {
1222 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1223 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1224 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1225 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1226 }
1227 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1228 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1229 {
1230 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1231 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1232 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1233 rc = VERR_INVALID_STATE;
1234 }
1235
1236 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1237 }
1238 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1239
1240 /*
1241 * Invalidate the host CPU identifiers before we disable the context
1242 * hook / restore preemption.
1243 */
1244 pVCpu->iHostCpuSet = UINT32_MAX;
1245 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1246
1247 /*
1248 * Disable context hooks. Due to unresolved cleanup issues, we
1249 * cannot leave the hooks enabled when we return to ring-3.
1250 *
1251 * Note! At the moment HM may also have disabled the hook
1252 * when we get here, but the IPRT API handles that.
1253 */
1254 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1255 {
1256 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1257 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1258 }
1259 }
1260 /*
1261 * The system is about to go into suspend mode; go back to ring 3.
1262 */
1263 else
1264 {
1265 rc = VINF_EM_RAW_INTERRUPT;
1266 pVCpu->iHostCpuSet = UINT32_MAX;
1267 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1268 }
1269
1270 /** @todo When HM stops messing with the context hook state, we'll disable
1271 * preemption again before the RTThreadCtxHookDisable call. */
1272 if (!fPreemptRestored)
1273 RTThreadPreemptRestore(&PreemptState);
1274
1275 pVCpu->vmm.s.iLastGZRc = rc;
1276
1277 /* Fire dtrace probe and collect statistics. */
1278 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1279#ifdef VBOX_WITH_STATISTICS
1280 vmmR0RecordRC(pVM, pVCpu, rc);
1281#endif
1282 }
1283 /*
1284 * Invalid CPU set index or TSC delta in need of measuring.
1285 */
1286 else
1287 {
1288 pVCpu->iHostCpuSet = UINT32_MAX;
1289 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1290 RTThreadPreemptRestore(&PreemptState);
1291 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1292 {
1293 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1294 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1295 0 /*default cTries*/);
1296 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1297 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1298 else
1299 pVCpu->vmm.s.iLastGZRc = rc;
1300 }
1301 else
1302 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1303 }
1304 break;
1305 }
1306
1307 /*
1308 * For profiling.
1309 */
1310 case VMMR0_DO_NOP:
1311 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1312 break;
1313
1314 /*
1315 * Impossible.
1316 */
1317 default:
1318 AssertMsgFailed(("%#x\n", enmOperation));
1319 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1320 break;
1321 }
1322 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1323}
1324
1325
1326/**
1327 * Validates a session or VM session argument.
1328 *
1329 * @returns true / false accordingly.
1330 * @param pVM The cross context VM structure.
1331 * @param pClaimedSession The session claim to validate.
1332 * @param pSession The session argument.
1333 */
1334DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1335{
1336 /* This must be set! */
1337 if (!pSession)
1338 return false;
1339
1340 /* Only one out of the two. */
1341 if (pVM && pClaimedSession)
1342 return false;
1343 if (pVM)
1344 pClaimedSession = pVM->pSession;
1345 return pClaimedSession == pSession;
1346}
1347
1348
1349/**
1350 * VMMR0EntryEx worker function, either called directly or when ever possible
1351 * called thru a longjmp so we can exit safely on failure.
1352 *
1353 * @returns VBox status code.
1354 * @param pGVM The global (ring-0) VM structure.
1355 * @param pVM The cross context VM structure.
1356 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1357 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1358 * @param enmOperation Which operation to execute.
1359 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1360 * The support driver validates this if it's present.
1361 * @param u64Arg Some simple constant argument.
1362 * @param pSession The session of the caller.
1363 *
1364 * @remarks Assume called with interrupts _enabled_.
1365 */
1366static int vmmR0EntryExWorker(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1367 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1368{
1369 /*
1370 * Validate pGVM, pVM and idCpu for consistency and validity.
1371 */
1372 if ( pGVM != NULL
1373 || pVM != NULL)
1374 {
1375 if (RT_LIKELY( RT_VALID_PTR(pGVM)
1376 && RT_VALID_PTR(pVM)
1377 && ((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0))
1378 { /* likely */ }
1379 else
1380 {
1381 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p and/or pVM=%p! (op=%d)\n", pGVM, pVM, enmOperation);
1382 return VERR_INVALID_POINTER;
1383 }
1384
1385 if (RT_LIKELY(pGVM->pVM == pVM))
1386 { /* likely */ }
1387 else
1388 {
1389 SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM->pVM=%p\n", pVM, pGVM->pVM);
1390 return VERR_INVALID_PARAMETER;
1391 }
1392
1393 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1394 { /* likely */ }
1395 else
1396 {
1397 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1398 return VERR_INVALID_PARAMETER;
1399 }
1400
1401 if (RT_LIKELY( pVM->enmVMState >= VMSTATE_CREATING
1402 && pVM->enmVMState <= VMSTATE_TERMINATED
1403 && pVM->cCpus == pGVM->cCpus
1404 && pVM->pSession == pSession
1405 && pVM->pVMR0 == pVM))
1406 { /* likely */ }
1407 else
1408 {
1409 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{.enmVMState=%d, .cCpus=%#x(==%#x), .pSession=%p(==%p), .pVMR0=%p(==%p)}! (op=%d)\n",
1410 pVM, pVM->enmVMState, pVM->cCpus, pGVM->cCpus, pVM->pSession, pSession, pVM->pVMR0, pVM, enmOperation);
1411 return VERR_INVALID_POINTER;
1412 }
1413 }
1414 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1415 { /* likely */ }
1416 else
1417 {
1418 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1419 return VERR_INVALID_PARAMETER;
1420 }
1421
1422 /*
1423 * SMAP fun.
1424 */
1425 VMM_CHECK_SMAP_SETUP();
1426 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1427
1428 /*
1429 * Process the request.
1430 */
1431 int rc;
1432 switch (enmOperation)
1433 {
1434 /*
1435 * GVM requests
1436 */
1437 case VMMR0_DO_GVMM_CREATE_VM:
1438 if (pGVM == NULL && pVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1439 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1440 else
1441 rc = VERR_INVALID_PARAMETER;
1442 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1443 break;
1444
1445 case VMMR0_DO_GVMM_DESTROY_VM:
1446 if (pReqHdr == NULL && u64Arg == 0)
1447 rc = GVMMR0DestroyVM(pGVM, pVM);
1448 else
1449 rc = VERR_INVALID_PARAMETER;
1450 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1451 break;
1452
1453 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1454 if (pGVM != NULL && pVM != NULL)
1455 rc = GVMMR0RegisterVCpu(pGVM, pVM, idCpu);
1456 else
1457 rc = VERR_INVALID_PARAMETER;
1458 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1459 break;
1460
1461 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1462 if (pGVM != NULL && pVM != NULL)
1463 rc = GVMMR0DeregisterVCpu(pGVM, pVM, idCpu);
1464 else
1465 rc = VERR_INVALID_PARAMETER;
1466 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1467 break;
1468
1469 case VMMR0_DO_GVMM_SCHED_HALT:
1470 if (pReqHdr)
1471 return VERR_INVALID_PARAMETER;
1472 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1473 rc = GVMMR0SchedHalt(pGVM, pVM, idCpu, u64Arg);
1474 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1475 break;
1476
1477 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1478 if (pReqHdr || u64Arg)
1479 return VERR_INVALID_PARAMETER;
1480 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1481 rc = GVMMR0SchedWakeUp(pGVM, pVM, idCpu);
1482 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1483 break;
1484
1485 case VMMR0_DO_GVMM_SCHED_POKE:
1486 if (pReqHdr || u64Arg)
1487 return VERR_INVALID_PARAMETER;
1488 rc = GVMMR0SchedPoke(pGVM, pVM, idCpu);
1489 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1490 break;
1491
1492 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1493 if (u64Arg)
1494 return VERR_INVALID_PARAMETER;
1495 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1496 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1497 break;
1498
1499 case VMMR0_DO_GVMM_SCHED_POLL:
1500 if (pReqHdr || u64Arg > 1)
1501 return VERR_INVALID_PARAMETER;
1502 rc = GVMMR0SchedPoll(pGVM, pVM, idCpu, !!u64Arg);
1503 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1504 break;
1505
1506 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1507 if (u64Arg)
1508 return VERR_INVALID_PARAMETER;
1509 rc = GVMMR0QueryStatisticsReq(pGVM, pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1510 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1511 break;
1512
1513 case VMMR0_DO_GVMM_RESET_STATISTICS:
1514 if (u64Arg)
1515 return VERR_INVALID_PARAMETER;
1516 rc = GVMMR0ResetStatisticsReq(pGVM, pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1517 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1518 break;
1519
1520 /*
1521 * Initialize the R0 part of a VM instance.
1522 */
1523 case VMMR0_DO_VMMR0_INIT:
1524 rc = vmmR0InitVM(pGVM, pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1525 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1526 break;
1527
1528 /*
1529 * Terminate the R0 part of a VM instance.
1530 */
1531 case VMMR0_DO_VMMR0_TERM:
1532 rc = VMMR0TermVM(pGVM, pVM, 0 /*idCpu*/);
1533 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1534 break;
1535
1536 /*
1537 * Attempt to enable hm mode and check the current setting.
1538 */
1539 case VMMR0_DO_HM_ENABLE:
1540 rc = HMR0EnableAllCpus(pVM);
1541 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1542 break;
1543
1544 /*
1545 * Setup the hardware accelerated session.
1546 */
1547 case VMMR0_DO_HM_SETUP_VM:
1548 rc = HMR0SetupVM(pVM);
1549 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1550 break;
1551
1552 /*
1553 * Switch to RC to execute Hypervisor function.
1554 */
1555 case VMMR0_DO_CALL_HYPERVISOR:
1556 {
1557#ifdef VBOX_WITH_RAW_MODE
1558 /*
1559 * Validate input / context.
1560 */
1561 if (RT_UNLIKELY(idCpu != 0))
1562 return VERR_INVALID_CPU_ID;
1563 if (RT_UNLIKELY(pVM->cCpus != 1))
1564 return VERR_INVALID_PARAMETER;
1565 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1566# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1567 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1568 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1569# endif
1570
1571 /*
1572 * Disable interrupts.
1573 */
1574 RTCCUINTREG fFlags = ASMIntDisableFlags();
1575
1576 /*
1577 * Get the host CPU identifiers, make sure they are valid and that
1578 * we've got a TSC delta for the CPU.
1579 */
1580 RTCPUID idHostCpu;
1581 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1582 if (RT_UNLIKELY(iHostCpuSet >= RTCPUSET_MAX_CPUS))
1583 {
1584 ASMSetFlags(fFlags);
1585 return VERR_INVALID_CPU_INDEX;
1586 }
1587 if (RT_UNLIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1588 {
1589 ASMSetFlags(fFlags);
1590 rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1591 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1592 0 /*default cTries*/);
1593 if (RT_FAILURE(rc) && rc != VERR_CPU_OFFLINE)
1594 {
1595 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1596 return rc;
1597 }
1598 }
1599
1600 /*
1601 * Commit the CPU identifiers.
1602 */
1603# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1604 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1605# endif
1606 pVCpu->iHostCpuSet = iHostCpuSet;
1607 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1608
1609 /*
1610 * We might need to disable VT-x if the active switcher turns off paging.
1611 */
1612 bool fVTxDisabled;
1613 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1614 if (RT_SUCCESS(rc))
1615 {
1616 /*
1617 * Go through the wormhole...
1618 */
1619 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1620
1621 /*
1622 * Re-enable VT-x before we dispatch any pending host interrupts.
1623 */
1624 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1625
1626 if ( rc == VINF_EM_RAW_INTERRUPT
1627 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1628 TRPMR0DispatchHostInterrupt(pVM);
1629 }
1630
1631 /*
1632 * Invalidate the host CPU identifiers as we restore interrupts.
1633 */
1634 pVCpu->iHostCpuSet = UINT32_MAX;
1635 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1636 ASMSetFlags(fFlags);
1637
1638#else /* !VBOX_WITH_RAW_MODE */
1639 rc = VERR_RAW_MODE_NOT_SUPPORTED;
1640#endif
1641 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1642 break;
1643 }
1644
1645 /*
1646 * PGM wrappers.
1647 */
1648 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1649 if (idCpu == NIL_VMCPUID)
1650 return VERR_INVALID_CPU_ID;
1651 rc = PGMR0PhysAllocateHandyPages(pGVM, pVM, idCpu);
1652 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1653 break;
1654
1655 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1656 if (idCpu == NIL_VMCPUID)
1657 return VERR_INVALID_CPU_ID;
1658 rc = PGMR0PhysFlushHandyPages(pGVM, pVM, idCpu);
1659 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1660 break;
1661
1662 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1663 if (idCpu == NIL_VMCPUID)
1664 return VERR_INVALID_CPU_ID;
1665 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, pVM, idCpu);
1666 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1667 break;
1668
1669 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1670 if (idCpu != 0)
1671 return VERR_INVALID_CPU_ID;
1672 rc = PGMR0PhysSetupIoMmu(pGVM, pVM);
1673 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1674 break;
1675
1676 /*
1677 * GMM wrappers.
1678 */
1679 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1680 if (u64Arg)
1681 return VERR_INVALID_PARAMETER;
1682 rc = GMMR0InitialReservationReq(pGVM, pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1683 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1684 break;
1685
1686 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1687 if (u64Arg)
1688 return VERR_INVALID_PARAMETER;
1689 rc = GMMR0UpdateReservationReq(pGVM, pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1690 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1691 break;
1692
1693 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1694 if (u64Arg)
1695 return VERR_INVALID_PARAMETER;
1696 rc = GMMR0AllocatePagesReq(pGVM, pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1697 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1698 break;
1699
1700 case VMMR0_DO_GMM_FREE_PAGES:
1701 if (u64Arg)
1702 return VERR_INVALID_PARAMETER;
1703 rc = GMMR0FreePagesReq(pGVM, pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1704 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1705 break;
1706
1707 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1708 if (u64Arg)
1709 return VERR_INVALID_PARAMETER;
1710 rc = GMMR0FreeLargePageReq(pGVM, pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1711 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1712 break;
1713
1714 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1715 if (u64Arg)
1716 return VERR_INVALID_PARAMETER;
1717 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1718 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1719 break;
1720
1721 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1722 if (idCpu == NIL_VMCPUID)
1723 return VERR_INVALID_CPU_ID;
1724 if (u64Arg)
1725 return VERR_INVALID_PARAMETER;
1726 rc = GMMR0QueryMemoryStatsReq(pGVM, pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1727 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1728 break;
1729
1730 case VMMR0_DO_GMM_BALLOONED_PAGES:
1731 if (u64Arg)
1732 return VERR_INVALID_PARAMETER;
1733 rc = GMMR0BalloonedPagesReq(pGVM, pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1734 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1735 break;
1736
1737 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1738 if (u64Arg)
1739 return VERR_INVALID_PARAMETER;
1740 rc = GMMR0MapUnmapChunkReq(pGVM, pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1741 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1742 break;
1743
1744 case VMMR0_DO_GMM_SEED_CHUNK:
1745 if (pReqHdr)
1746 return VERR_INVALID_PARAMETER;
1747 rc = GMMR0SeedChunk(pGVM, pVM, idCpu, (RTR3PTR)u64Arg);
1748 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1749 break;
1750
1751 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1752 if (idCpu == NIL_VMCPUID)
1753 return VERR_INVALID_CPU_ID;
1754 if (u64Arg)
1755 return VERR_INVALID_PARAMETER;
1756 rc = GMMR0RegisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1757 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1758 break;
1759
1760 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1761 if (idCpu == NIL_VMCPUID)
1762 return VERR_INVALID_CPU_ID;
1763 if (u64Arg)
1764 return VERR_INVALID_PARAMETER;
1765 rc = GMMR0UnregisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1766 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1767 break;
1768
1769 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1770 if (idCpu == NIL_VMCPUID)
1771 return VERR_INVALID_CPU_ID;
1772 if ( u64Arg
1773 || pReqHdr)
1774 return VERR_INVALID_PARAMETER;
1775 rc = GMMR0ResetSharedModules(pGVM, pVM, idCpu);
1776 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1777 break;
1778
1779#ifdef VBOX_WITH_PAGE_SHARING
1780 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1781 {
1782 if (idCpu == NIL_VMCPUID)
1783 return VERR_INVALID_CPU_ID;
1784 if ( u64Arg
1785 || pReqHdr)
1786 return VERR_INVALID_PARAMETER;
1787 rc = GMMR0CheckSharedModules(pGVM, pVM, idCpu);
1788 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1789 break;
1790 }
1791#endif
1792
1793#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1794 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1795 if (u64Arg)
1796 return VERR_INVALID_PARAMETER;
1797 rc = GMMR0FindDuplicatePageReq(pGVM, pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1798 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1799 break;
1800#endif
1801
1802 case VMMR0_DO_GMM_QUERY_STATISTICS:
1803 if (u64Arg)
1804 return VERR_INVALID_PARAMETER;
1805 rc = GMMR0QueryStatisticsReq(pGVM, pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1806 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1807 break;
1808
1809 case VMMR0_DO_GMM_RESET_STATISTICS:
1810 if (u64Arg)
1811 return VERR_INVALID_PARAMETER;
1812 rc = GMMR0ResetStatisticsReq(pGVM, pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1813 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1814 break;
1815
1816 /*
1817 * A quick GCFGM mock-up.
1818 */
1819 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1820 case VMMR0_DO_GCFGM_SET_VALUE:
1821 case VMMR0_DO_GCFGM_QUERY_VALUE:
1822 {
1823 if (pGVM || pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1824 return VERR_INVALID_PARAMETER;
1825 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1826 if (pReq->Hdr.cbReq != sizeof(*pReq))
1827 return VERR_INVALID_PARAMETER;
1828 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1829 {
1830 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1831 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1832 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1833 }
1834 else
1835 {
1836 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1837 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1838 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1839 }
1840 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1841 break;
1842 }
1843
1844 /*
1845 * PDM Wrappers.
1846 */
1847 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1848 {
1849 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1850 return VERR_INVALID_PARAMETER;
1851 rc = PDMR0DriverCallReqHandler(pGVM, pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1852 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1853 break;
1854 }
1855
1856 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1857 {
1858 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1859 return VERR_INVALID_PARAMETER;
1860 rc = PDMR0DeviceCallReqHandler(pGVM, pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1861 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1862 break;
1863 }
1864
1865 /*
1866 * Requests to the internal networking service.
1867 */
1868 case VMMR0_DO_INTNET_OPEN:
1869 {
1870 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1871 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1872 return VERR_INVALID_PARAMETER;
1873 rc = IntNetR0OpenReq(pSession, pReq);
1874 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1875 break;
1876 }
1877
1878 case VMMR0_DO_INTNET_IF_CLOSE:
1879 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1880 return VERR_INVALID_PARAMETER;
1881 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1882 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1883 break;
1884
1885
1886 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1887 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1888 return VERR_INVALID_PARAMETER;
1889 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1890 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1891 break;
1892
1893 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1894 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1895 return VERR_INVALID_PARAMETER;
1896 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1897 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1898 break;
1899
1900 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1901 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1902 return VERR_INVALID_PARAMETER;
1903 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1904 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1905 break;
1906
1907 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1908 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1909 return VERR_INVALID_PARAMETER;
1910 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1911 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1912 break;
1913
1914 case VMMR0_DO_INTNET_IF_SEND:
1915 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1916 return VERR_INVALID_PARAMETER;
1917 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1918 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1919 break;
1920
1921 case VMMR0_DO_INTNET_IF_WAIT:
1922 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1923 return VERR_INVALID_PARAMETER;
1924 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1925 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1926 break;
1927
1928 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1929 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1930 return VERR_INVALID_PARAMETER;
1931 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1932 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1933 break;
1934
1935#ifdef VBOX_WITH_PCI_PASSTHROUGH
1936 /*
1937 * Requests to host PCI driver service.
1938 */
1939 case VMMR0_DO_PCIRAW_REQ:
1940 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1941 return VERR_INVALID_PARAMETER;
1942 rc = PciRawR0ProcessReq(pGVM, pVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
1943 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1944 break;
1945#endif
1946 /*
1947 * For profiling.
1948 */
1949 case VMMR0_DO_NOP:
1950 case VMMR0_DO_SLOW_NOP:
1951 return VINF_SUCCESS;
1952
1953 /*
1954 * For testing Ring-0 APIs invoked in this environment.
1955 */
1956 case VMMR0_DO_TESTS:
1957 /** @todo make new test */
1958 return VINF_SUCCESS;
1959
1960
1961#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
1962 case VMMR0_DO_TEST_SWITCHER3264:
1963 if (idCpu == NIL_VMCPUID)
1964 return VERR_INVALID_CPU_ID;
1965 rc = HMR0TestSwitcher3264(pVM);
1966 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1967 break;
1968#endif
1969 default:
1970 /*
1971 * We're returning VERR_NOT_SUPPORT here so we've got something else
1972 * than -1 which the interrupt gate glue code might return.
1973 */
1974 Log(("operation %#x is not supported\n", enmOperation));
1975 return VERR_NOT_SUPPORTED;
1976 }
1977 return rc;
1978}
1979
1980
1981/**
1982 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1983 */
1984typedef struct VMMR0ENTRYEXARGS
1985{
1986 PGVM pGVM;
1987 PVM pVM;
1988 VMCPUID idCpu;
1989 VMMR0OPERATION enmOperation;
1990 PSUPVMMR0REQHDR pReq;
1991 uint64_t u64Arg;
1992 PSUPDRVSESSION pSession;
1993} VMMR0ENTRYEXARGS;
1994/** Pointer to a vmmR0EntryExWrapper argument package. */
1995typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1996
1997/**
1998 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1999 *
2000 * @returns VBox status code.
2001 * @param pvArgs The argument package
2002 */
2003static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2004{
2005 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2006 ((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
2007 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2008 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2009 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2010 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2011 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2012}
2013
2014
2015/**
2016 * The Ring 0 entry point, called by the support library (SUP).
2017 *
2018 * @returns VBox status code.
2019 * @param pGVM The global (ring-0) VM structure.
2020 * @param pVM The cross context VM structure.
2021 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2022 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2023 * @param enmOperation Which operation to execute.
2024 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2025 * @param u64Arg Some simple constant argument.
2026 * @param pSession The session of the caller.
2027 * @remarks Assume called with interrupts _enabled_.
2028 */
2029VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2030 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2031{
2032 /*
2033 * Requests that should only happen on the EMT thread will be
2034 * wrapped in a setjmp so we can assert without causing trouble.
2035 */
2036 if ( pVM != NULL
2037 && pGVM != NULL
2038 && idCpu < pGVM->cCpus
2039 && pVM->pVMR0 != NULL)
2040 {
2041 switch (enmOperation)
2042 {
2043 /* These might/will be called before VMMR3Init. */
2044 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2045 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2046 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2047 case VMMR0_DO_GMM_FREE_PAGES:
2048 case VMMR0_DO_GMM_BALLOONED_PAGES:
2049 /* On the mac we might not have a valid jmp buf, so check these as well. */
2050 case VMMR0_DO_VMMR0_INIT:
2051 case VMMR0_DO_VMMR0_TERM:
2052 {
2053 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2054 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2055 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2056 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2057 && pVCpu->hNativeThreadR0 == hNativeThread))
2058 {
2059 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2060 break;
2061
2062 /** @todo validate this EMT claim... GVM knows. */
2063 VMMR0ENTRYEXARGS Args;
2064 Args.pGVM = pGVM;
2065 Args.pVM = pVM;
2066 Args.idCpu = idCpu;
2067 Args.enmOperation = enmOperation;
2068 Args.pReq = pReq;
2069 Args.u64Arg = u64Arg;
2070 Args.pSession = pSession;
2071 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2072 }
2073 return VERR_VM_THREAD_NOT_EMT;
2074 }
2075
2076 default:
2077 break;
2078 }
2079 }
2080 return vmmR0EntryExWorker(pGVM, pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2081}
2082
2083
2084/**
2085 * Checks whether we've armed the ring-0 long jump machinery.
2086 *
2087 * @returns @c true / @c false
2088 * @param pVCpu The cross context virtual CPU structure.
2089 * @thread EMT
2090 * @sa VMMIsLongJumpArmed
2091 */
2092VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
2093{
2094#ifdef RT_ARCH_X86
2095 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2096 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2097#else
2098 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2099 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2100#endif
2101}
2102
2103
2104/**
2105 * Checks whether we've done a ring-3 long jump.
2106 *
2107 * @returns @c true / @c false
2108 * @param pVCpu The cross context virtual CPU structure.
2109 * @thread EMT
2110 */
2111VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
2112{
2113 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2114}
2115
2116
2117/**
2118 * Internal R0 logger worker: Flush logger.
2119 *
2120 * @param pLogger The logger instance to flush.
2121 * @remark This function must be exported!
2122 */
2123VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2124{
2125#ifdef LOG_ENABLED
2126 /*
2127 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2128 * (This is a bit paranoid code.)
2129 */
2130 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2131 if ( !VALID_PTR(pR0Logger)
2132 || !VALID_PTR(pR0Logger + 1)
2133 || pLogger->u32Magic != RTLOGGER_MAGIC)
2134 {
2135# ifdef DEBUG
2136 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2137# endif
2138 return;
2139 }
2140 if (pR0Logger->fFlushingDisabled)
2141 return; /* quietly */
2142
2143 PVM pVM = pR0Logger->pVM;
2144 if ( !VALID_PTR(pVM)
2145 || pVM->pVMR0 != pVM)
2146 {
2147# ifdef DEBUG
2148 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
2149# endif
2150 return;
2151 }
2152
2153 PVMCPU pVCpu = VMMGetCpu(pVM);
2154 if (pVCpu)
2155 {
2156 /*
2157 * Check that the jump buffer is armed.
2158 */
2159# ifdef RT_ARCH_X86
2160 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2161 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2162# else
2163 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2164 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2165# endif
2166 {
2167# ifdef DEBUG
2168 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2169# endif
2170 return;
2171 }
2172 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2173 }
2174# ifdef DEBUG
2175 else
2176 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2177# endif
2178#else
2179 NOREF(pLogger);
2180#endif /* LOG_ENABLED */
2181}
2182
2183/**
2184 * Internal R0 logger worker: Custom prefix.
2185 *
2186 * @returns Number of chars written.
2187 *
2188 * @param pLogger The logger instance.
2189 * @param pchBuf The output buffer.
2190 * @param cchBuf The size of the buffer.
2191 * @param pvUser User argument (ignored).
2192 */
2193VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
2194{
2195 NOREF(pvUser);
2196#ifdef LOG_ENABLED
2197 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2198 if ( !VALID_PTR(pR0Logger)
2199 || !VALID_PTR(pR0Logger + 1)
2200 || pLogger->u32Magic != RTLOGGER_MAGIC
2201 || cchBuf < 2)
2202 return 0;
2203
2204 static const char s_szHex[17] = "0123456789abcdef";
2205 VMCPUID const idCpu = pR0Logger->idCpu;
2206 pchBuf[1] = s_szHex[ idCpu & 15];
2207 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
2208
2209 return 2;
2210#else
2211 NOREF(pLogger); NOREF(pchBuf); NOREF(cchBuf);
2212 return 0;
2213#endif
2214}
2215
2216#ifdef LOG_ENABLED
2217
2218/**
2219 * Disables flushing of the ring-0 debug log.
2220 *
2221 * @param pVCpu The cross context virtual CPU structure.
2222 */
2223VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
2224{
2225 if (pVCpu->vmm.s.pR0LoggerR0)
2226 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2227}
2228
2229
2230/**
2231 * Enables flushing of the ring-0 debug log.
2232 *
2233 * @param pVCpu The cross context virtual CPU structure.
2234 */
2235VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
2236{
2237 if (pVCpu->vmm.s.pR0LoggerR0)
2238 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2239}
2240
2241
2242/**
2243 * Checks if log flushing is disabled or not.
2244 *
2245 * @param pVCpu The cross context virtual CPU structure.
2246 */
2247VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
2248{
2249 if (pVCpu->vmm.s.pR0LoggerR0)
2250 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2251 return true;
2252}
2253#endif /* LOG_ENABLED */
2254
2255/**
2256 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2257 *
2258 * @returns true if the breakpoint should be hit, false if it should be ignored.
2259 */
2260DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2261{
2262#if 0
2263 return true;
2264#else
2265 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2266 if (pVM)
2267 {
2268 PVMCPU pVCpu = VMMGetCpu(pVM);
2269
2270 if (pVCpu)
2271 {
2272#ifdef RT_ARCH_X86
2273 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2274 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2275#else
2276 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2277 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2278#endif
2279 {
2280 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2281 return RT_FAILURE_NP(rc);
2282 }
2283 }
2284 }
2285#ifdef RT_OS_LINUX
2286 return true;
2287#else
2288 return false;
2289#endif
2290#endif
2291}
2292
2293
2294/**
2295 * Override this so we can push it up to ring-3.
2296 *
2297 * @param pszExpr Expression. Can be NULL.
2298 * @param uLine Location line number.
2299 * @param pszFile Location file name.
2300 * @param pszFunction Location function name.
2301 */
2302DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2303{
2304 /*
2305 * To the log.
2306 */
2307 LogAlways(("\n!!R0-Assertion Failed!!\n"
2308 "Expression: %s\n"
2309 "Location : %s(%d) %s\n",
2310 pszExpr, pszFile, uLine, pszFunction));
2311
2312 /*
2313 * To the global VMM buffer.
2314 */
2315 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2316 if (pVM)
2317 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2318 "\n!!R0-Assertion Failed!!\n"
2319 "Expression: %.*s\n"
2320 "Location : %s(%d) %s\n",
2321 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2322 pszFile, uLine, pszFunction);
2323
2324 /*
2325 * Continue the normal way.
2326 */
2327 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2328}
2329
2330
2331/**
2332 * Callback for RTLogFormatV which writes to the ring-3 log port.
2333 * See PFNLOGOUTPUT() for details.
2334 */
2335static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2336{
2337 for (size_t i = 0; i < cbChars; i++)
2338 {
2339 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2340 }
2341
2342 NOREF(pv);
2343 return cbChars;
2344}
2345
2346
2347/**
2348 * Override this so we can push it up to ring-3.
2349 *
2350 * @param pszFormat The format string.
2351 * @param va Arguments.
2352 */
2353DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2354{
2355 va_list vaCopy;
2356
2357 /*
2358 * Push the message to the loggers.
2359 */
2360 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2361 if (pLog)
2362 {
2363 va_copy(vaCopy, va);
2364 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2365 va_end(vaCopy);
2366 }
2367 pLog = RTLogRelGetDefaultInstance();
2368 if (pLog)
2369 {
2370 va_copy(vaCopy, va);
2371 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2372 va_end(vaCopy);
2373 }
2374
2375 /*
2376 * Push it to the global VMM buffer.
2377 */
2378 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2379 if (pVM)
2380 {
2381 va_copy(vaCopy, va);
2382 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2383 va_end(vaCopy);
2384 }
2385
2386 /*
2387 * Continue the normal way.
2388 */
2389 RTAssertMsg2V(pszFormat, va);
2390}
2391
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette