VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 68007

Last change on this file since 68007 was 68007, checked in by vboxsync, 7 years ago

VMMR0,GVMMR0: Adding GVM parameter to the calls.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 83.9 KB
Line 
1/* $Id: VMMR0.cpp 68007 2017-07-17 17:07:37Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/stam.h>
30#include <VBox/vmm/tm.h>
31#include "VMMInternal.h"
32#include <VBox/vmm/vm.h>
33#include <VBox/vmm/gvm.h>
34#ifdef VBOX_WITH_PCI_PASSTHROUGH
35# include <VBox/vmm/pdmpci.h>
36#endif
37#include <VBox/vmm/apic.h>
38
39#include <VBox/vmm/gvmm.h>
40#include <VBox/vmm/gmm.h>
41#include <VBox/vmm/gim.h>
42#include <VBox/intnet.h>
43#include <VBox/vmm/hm.h>
44#include <VBox/param.h>
45#include <VBox/err.h>
46#include <VBox/version.h>
47#include <VBox/log.h>
48
49#include <iprt/asm-amd64-x86.h>
50#include <iprt/assert.h>
51#include <iprt/crc.h>
52#include <iprt/mp.h>
53#include <iprt/once.h>
54#include <iprt/stdarg.h>
55#include <iprt/string.h>
56#include <iprt/thread.h>
57#include <iprt/timer.h>
58
59#include "dtrace/VBoxVMM.h"
60
61
62#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
63# pragma intrinsic(_AddressOfReturnAddress)
64#endif
65
66#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
67# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
68#endif
69
70
71
72/*********************************************************************************************************************************
73* Defined Constants And Macros *
74*********************************************************************************************************************************/
75/** @def VMM_CHECK_SMAP_SETUP
76 * SMAP check setup. */
77/** @def VMM_CHECK_SMAP_CHECK
78 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
79 * it will be logged and @a a_BadExpr is executed. */
80/** @def VMM_CHECK_SMAP_CHECK2
81 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
82 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
83 * executed. */
84#if defined(VBOX_STRICT) || 1
85# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
86# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
87 do { \
88 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
89 { \
90 RTCCUINTREG fEflCheck = ASMGetFlags(); \
91 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
92 { /* likely */ } \
93 else \
94 { \
95 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
96 a_BadExpr; \
97 } \
98 } \
99 } while (0)
100# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
101 do { \
102 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
103 { \
104 RTCCUINTREG fEflCheck = ASMGetFlags(); \
105 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
106 { /* likely */ } \
107 else \
108 { \
109 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
110 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
111 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
112 a_BadExpr; \
113 } \
114 } \
115 } while (0)
116#else
117# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
118# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
119# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) NOREF(fKernelFeatures)
120#endif
121
122
123/*********************************************************************************************************************************
124* Internal Functions *
125*********************************************************************************************************************************/
126RT_C_DECLS_BEGIN
127#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
128extern uint64_t __udivdi3(uint64_t, uint64_t);
129extern uint64_t __umoddi3(uint64_t, uint64_t);
130#endif
131RT_C_DECLS_END
132
133
134/*********************************************************************************************************************************
135* Global Variables *
136*********************************************************************************************************************************/
137/** Drag in necessary library bits.
138 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
139PFNRT g_VMMR0Deps[] =
140{
141 (PFNRT)RTCrc32,
142 (PFNRT)RTOnce,
143#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
144 (PFNRT)__udivdi3,
145 (PFNRT)__umoddi3,
146#endif
147 NULL
148};
149
150#ifdef RT_OS_SOLARIS
151/* Dependency information for the native solaris loader. */
152extern "C" { char _depends_on[] = "vboxdrv"; }
153#endif
154
155
156
157/**
158 * Initialize the module.
159 * This is called when we're first loaded.
160 *
161 * @returns 0 on success.
162 * @returns VBox status on failure.
163 * @param hMod Image handle for use in APIs.
164 */
165DECLEXPORT(int) ModuleInit(void *hMod)
166{
167 VMM_CHECK_SMAP_SETUP();
168 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
169
170#ifdef VBOX_WITH_DTRACE_R0
171 /*
172 * The first thing to do is register the static tracepoints.
173 * (Deregistration is automatic.)
174 */
175 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
176 if (RT_FAILURE(rc2))
177 return rc2;
178#endif
179 LogFlow(("ModuleInit:\n"));
180
181#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
182 /*
183 * Display the CMOS debug code.
184 */
185 ASMOutU8(0x72, 0x03);
186 uint8_t bDebugCode = ASMInU8(0x73);
187 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
188 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
189#endif
190
191 /*
192 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
193 */
194 int rc = vmmInitFormatTypes();
195 if (RT_SUCCESS(rc))
196 {
197 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
198 rc = GVMMR0Init();
199 if (RT_SUCCESS(rc))
200 {
201 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
202 rc = GMMR0Init();
203 if (RT_SUCCESS(rc))
204 {
205 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
206 rc = HMR0Init();
207 if (RT_SUCCESS(rc))
208 {
209 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
210 rc = PGMRegisterStringFormatTypes();
211 if (RT_SUCCESS(rc))
212 {
213 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
214#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
215 rc = PGMR0DynMapInit();
216#endif
217 if (RT_SUCCESS(rc))
218 {
219 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
220 rc = IntNetR0Init();
221 if (RT_SUCCESS(rc))
222 {
223#ifdef VBOX_WITH_PCI_PASSTHROUGH
224 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
225 rc = PciRawR0Init();
226#endif
227 if (RT_SUCCESS(rc))
228 {
229 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
230 rc = CPUMR0ModuleInit();
231 if (RT_SUCCESS(rc))
232 {
233#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
234 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
235 rc = vmmR0TripleFaultHackInit();
236 if (RT_SUCCESS(rc))
237#endif
238 {
239 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
240 if (RT_SUCCESS(rc))
241 {
242 LogFlow(("ModuleInit: returns success.\n"));
243 return VINF_SUCCESS;
244 }
245 }
246
247 /*
248 * Bail out.
249 */
250#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
251 vmmR0TripleFaultHackTerm();
252#endif
253 }
254 else
255 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
256#ifdef VBOX_WITH_PCI_PASSTHROUGH
257 PciRawR0Term();
258#endif
259 }
260 else
261 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
262 IntNetR0Term();
263 }
264 else
265 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
266#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
267 PGMR0DynMapTerm();
268#endif
269 }
270 else
271 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
272 PGMDeregisterStringFormatTypes();
273 }
274 else
275 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
276 HMR0Term();
277 }
278 else
279 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
280 GMMR0Term();
281 }
282 else
283 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
284 GVMMR0Term();
285 }
286 else
287 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
288 vmmTermFormatTypes();
289 }
290 else
291 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
292
293 LogFlow(("ModuleInit: failed %Rrc\n", rc));
294 return rc;
295}
296
297
298/**
299 * Terminate the module.
300 * This is called when we're finally unloaded.
301 *
302 * @param hMod Image handle for use in APIs.
303 */
304DECLEXPORT(void) ModuleTerm(void *hMod)
305{
306 NOREF(hMod);
307 LogFlow(("ModuleTerm:\n"));
308
309 /*
310 * Terminate the CPUM module (Local APIC cleanup).
311 */
312 CPUMR0ModuleTerm();
313
314 /*
315 * Terminate the internal network service.
316 */
317 IntNetR0Term();
318
319 /*
320 * PGM (Darwin), HM and PciRaw global cleanup.
321 */
322#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
323 PGMR0DynMapTerm();
324#endif
325#ifdef VBOX_WITH_PCI_PASSTHROUGH
326 PciRawR0Term();
327#endif
328 PGMDeregisterStringFormatTypes();
329 HMR0Term();
330#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
331 vmmR0TripleFaultHackTerm();
332#endif
333
334 /*
335 * Destroy the GMM and GVMM instances.
336 */
337 GMMR0Term();
338 GVMMR0Term();
339
340 vmmTermFormatTypes();
341
342 LogFlow(("ModuleTerm: returns\n"));
343}
344
345
346/**
347 * Initiates the R0 driver for a particular VM instance.
348 *
349 * @returns VBox status code.
350 *
351 * @param pVM The cross context VM structure.
352 * @param uSvnRev The SVN revision of the ring-3 part.
353 * @param uBuildType Build type indicator.
354 * @thread EMT.
355 */
356static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
357{
358 VMM_CHECK_SMAP_SETUP();
359 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
360
361 /*
362 * Match the SVN revisions and build type.
363 */
364 if (uSvnRev != VMMGetSvnRev())
365 {
366 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
367 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
368 return VERR_VMM_R0_VERSION_MISMATCH;
369 }
370 if (uBuildType != vmmGetBuildType())
371 {
372 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
373 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
374 return VERR_VMM_R0_VERSION_MISMATCH;
375 }
376 if ( !VALID_PTR(pVM)
377 || pVM->pVMR0 != pVM)
378 return VERR_INVALID_PARAMETER;
379
380
381#ifdef LOG_ENABLED
382 /*
383 * Register the EMT R0 logger instance for VCPU 0.
384 */
385 PVMCPU pVCpu = &pVM->aCpus[0];
386
387 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
388 if (pR0Logger)
389 {
390# if 0 /* testing of the logger. */
391 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
392 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
393 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
394 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
395
396 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
397 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
398 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
399 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
400
401 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
402 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
403 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
404 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
405
406 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
407 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
408 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
409 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
410 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
411 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
412
413 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
414 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
415
416 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
417 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
418 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
419# endif
420 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
421 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
422 pR0Logger->fRegistered = true;
423 }
424#endif /* LOG_ENABLED */
425
426 /*
427 * Check if the host supports high resolution timers or not.
428 */
429 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
430 && !RTTimerCanDoHighResolution())
431 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
432
433 /*
434 * Initialize the per VM data for GVMM and GMM.
435 */
436 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
437 int rc = GVMMR0InitVM(pVM);
438// if (RT_SUCCESS(rc))
439// rc = GMMR0InitPerVMData(pVM);
440 if (RT_SUCCESS(rc))
441 {
442 /*
443 * Init HM, CPUM and PGM (Darwin only).
444 */
445 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
446 rc = HMR0InitVM(pVM);
447 if (RT_SUCCESS(rc))
448 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
449 if (RT_SUCCESS(rc))
450 {
451 rc = CPUMR0InitVM(pVM);
452 if (RT_SUCCESS(rc))
453 {
454 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
455#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
456 rc = PGMR0DynMapInitVM(pVM);
457#endif
458 if (RT_SUCCESS(rc))
459 {
460 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
461#ifdef VBOX_WITH_PCI_PASSTHROUGH
462 rc = PciRawR0InitVM(pVM);
463#endif
464 if (RT_SUCCESS(rc))
465 {
466 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
467 rc = GIMR0InitVM(pVM);
468 if (RT_SUCCESS(rc))
469 {
470 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
471 if (RT_SUCCESS(rc))
472 {
473 GVMMR0DoneInitVM(pVM);
474 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
475 return rc;
476 }
477
478 /* bail out*/
479 GIMR0TermVM(pVM);
480 }
481#ifdef VBOX_WITH_PCI_PASSTHROUGH
482 PciRawR0TermVM(pVM);
483#endif
484 }
485 }
486 }
487 HMR0TermVM(pVM);
488 }
489 }
490
491 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
492 return rc;
493}
494
495
496/**
497 * Terminates the R0 bits for a particular VM instance.
498 *
499 * This is normally called by ring-3 as part of the VM termination process, but
500 * may alternatively be called during the support driver session cleanup when
501 * the VM object is destroyed (see GVMM).
502 *
503 * @returns VBox status code.
504 *
505 * @param pVM The cross context VM structure.
506 * @param pGVM Pointer to the global VM structure. Optional.
507 * @thread EMT or session clean up thread.
508 */
509VMMR0_INT_DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
510{
511#ifdef VBOX_WITH_PCI_PASSTHROUGH
512 PciRawR0TermVM(pVM);
513#endif
514
515 /*
516 * Tell GVMM what we're up to and check that we only do this once.
517 */
518 if (GVMMR0DoingTermVM(pVM, pGVM))
519 {
520 GIMR0TermVM(pVM);
521
522 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
523 * here to make sure we don't leak any shared pages if we crash... */
524#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
525 PGMR0DynMapTermVM(pVM);
526#endif
527 HMR0TermVM(pVM);
528 }
529
530 /*
531 * Deregister the logger.
532 */
533 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
534 return VINF_SUCCESS;
535}
536
537
538/**
539 * VMM ring-0 thread-context callback.
540 *
541 * This does common HM state updating and calls the HM-specific thread-context
542 * callback.
543 *
544 * @param enmEvent The thread-context event.
545 * @param pvUser Opaque pointer to the VMCPU.
546 *
547 * @thread EMT(pvUser)
548 */
549static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
550{
551 PVMCPU pVCpu = (PVMCPU)pvUser;
552
553 switch (enmEvent)
554 {
555 case RTTHREADCTXEVENT_IN:
556 {
557 /*
558 * Linux may call us with preemption enabled (really!) but technically we
559 * cannot get preempted here, otherwise we end up in an infinite recursion
560 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
561 * ad infinitum). Let's just disable preemption for now...
562 */
563 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
564 * preemption after doing the callout (one or two functions up the
565 * call chain). */
566 /** @todo r=ramshankar: See @bugref{5313#c30}. */
567 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
568 RTThreadPreemptDisable(&ParanoidPreemptState);
569
570 /* We need to update the VCPU <-> host CPU mapping. */
571 RTCPUID idHostCpu;
572 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
573 pVCpu->iHostCpuSet = iHostCpuSet;
574 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
575
576 /* In the very unlikely event that the GIP delta for the CPU we're
577 rescheduled needs calculating, try force a return to ring-3.
578 We unfortunately cannot do the measurements right here. */
579 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
580 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
581
582 /* Invoke the HM-specific thread-context callback. */
583 HMR0ThreadCtxCallback(enmEvent, pvUser);
584
585 /* Restore preemption. */
586 RTThreadPreemptRestore(&ParanoidPreemptState);
587 break;
588 }
589
590 case RTTHREADCTXEVENT_OUT:
591 {
592 /* Invoke the HM-specific thread-context callback. */
593 HMR0ThreadCtxCallback(enmEvent, pvUser);
594
595 /*
596 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
597 * have the same host CPU associated with it.
598 */
599 pVCpu->iHostCpuSet = UINT32_MAX;
600 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
601 break;
602 }
603
604 default:
605 /* Invoke the HM-specific thread-context callback. */
606 HMR0ThreadCtxCallback(enmEvent, pvUser);
607 break;
608 }
609}
610
611
612/**
613 * Creates thread switching hook for the current EMT thread.
614 *
615 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
616 * platform does not implement switcher hooks, no hooks will be create and the
617 * member set to NIL_RTTHREADCTXHOOK.
618 *
619 * @returns VBox status code.
620 * @param pVCpu The cross context virtual CPU structure.
621 * @thread EMT(pVCpu)
622 */
623VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu)
624{
625 VMCPU_ASSERT_EMT(pVCpu);
626 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
627
628#if 1 /* To disable this stuff change to zero. */
629 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
630 if (RT_SUCCESS(rc))
631 return rc;
632#else
633 RT_NOREF(vmmR0ThreadCtxCallback);
634 int rc = VERR_NOT_SUPPORTED;
635#endif
636
637 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
638 if (rc == VERR_NOT_SUPPORTED)
639 return VINF_SUCCESS;
640
641 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
642 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
643}
644
645
646/**
647 * Destroys the thread switching hook for the specified VCPU.
648 *
649 * @param pVCpu The cross context virtual CPU structure.
650 * @remarks Can be called from any thread.
651 */
652VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu)
653{
654 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
655 AssertRC(rc);
656 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
657}
658
659
660/**
661 * Disables the thread switching hook for this VCPU (if we got one).
662 *
663 * @param pVCpu The cross context virtual CPU structure.
664 * @thread EMT(pVCpu)
665 *
666 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
667 * this call. This means you have to be careful with what you do!
668 */
669VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu)
670{
671 /*
672 * Clear the VCPU <-> host CPU mapping as we've left HM context.
673 * @bugref{7726#c19} explains the need for this trick:
674 *
675 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
676 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
677 * longjmp & normal return to ring-3, which opens a window where we may be
678 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
679 * the CPU starts executing a different EMT. Both functions first disables
680 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
681 * an opening for getting preempted.
682 */
683 /** @todo Make HM not need this API! Then we could leave the hooks enabled
684 * all the time. */
685 /** @todo move this into the context hook disabling if(). */
686 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
687
688 /*
689 * Disable the context hook, if we got one.
690 */
691 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
692 {
693 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
694 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
695 AssertRC(rc);
696 }
697}
698
699
700/**
701 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
702 *
703 * @returns true if registered, false otherwise.
704 * @param pVCpu The cross context virtual CPU structure.
705 */
706DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
707{
708 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
709}
710
711
712/**
713 * Whether thread-context hooks are registered for this VCPU.
714 *
715 * @returns true if registered, false otherwise.
716 * @param pVCpu The cross context virtual CPU structure.
717 */
718VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
719{
720 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
721}
722
723
724#ifdef VBOX_WITH_STATISTICS
725/**
726 * Record return code statistics
727 * @param pVM The cross context VM structure.
728 * @param pVCpu The cross context virtual CPU structure.
729 * @param rc The status code.
730 */
731static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
732{
733 /*
734 * Collect statistics.
735 */
736 switch (rc)
737 {
738 case VINF_SUCCESS:
739 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
740 break;
741 case VINF_EM_RAW_INTERRUPT:
742 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
743 break;
744 case VINF_EM_RAW_INTERRUPT_HYPER:
745 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
746 break;
747 case VINF_EM_RAW_GUEST_TRAP:
748 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
749 break;
750 case VINF_EM_RAW_RING_SWITCH:
751 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
752 break;
753 case VINF_EM_RAW_RING_SWITCH_INT:
754 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
755 break;
756 case VINF_EM_RAW_STALE_SELECTOR:
757 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
758 break;
759 case VINF_EM_RAW_IRET_TRAP:
760 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
761 break;
762 case VINF_IOM_R3_IOPORT_READ:
763 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
764 break;
765 case VINF_IOM_R3_IOPORT_WRITE:
766 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
767 break;
768 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
769 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
770 break;
771 case VINF_IOM_R3_MMIO_READ:
772 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
773 break;
774 case VINF_IOM_R3_MMIO_WRITE:
775 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
776 break;
777 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
778 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
779 break;
780 case VINF_IOM_R3_MMIO_READ_WRITE:
781 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
782 break;
783 case VINF_PATM_HC_MMIO_PATCH_READ:
784 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
785 break;
786 case VINF_PATM_HC_MMIO_PATCH_WRITE:
787 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
788 break;
789 case VINF_CPUM_R3_MSR_READ:
790 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
791 break;
792 case VINF_CPUM_R3_MSR_WRITE:
793 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
794 break;
795 case VINF_EM_RAW_EMULATE_INSTR:
796 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
797 break;
798 case VINF_EM_RAW_EMULATE_IO_BLOCK:
799 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
800 break;
801 case VINF_PATCH_EMULATE_INSTR:
802 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
803 break;
804 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
805 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
806 break;
807 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
808 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
809 break;
810 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
811 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
812 break;
813 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
814 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
815 break;
816 case VINF_CSAM_PENDING_ACTION:
817 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
818 break;
819 case VINF_PGM_SYNC_CR3:
820 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
821 break;
822 case VINF_PATM_PATCH_INT3:
823 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
824 break;
825 case VINF_PATM_PATCH_TRAP_PF:
826 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
827 break;
828 case VINF_PATM_PATCH_TRAP_GP:
829 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
830 break;
831 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
832 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
833 break;
834 case VINF_EM_RESCHEDULE_REM:
835 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
836 break;
837 case VINF_EM_RAW_TO_R3:
838 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
839 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
840 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
841 else if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
842 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
843 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES))
844 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
845 else if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
846 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
847 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
848 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
849 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))
850 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
851 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
852 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
853 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3))
854 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
855 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
856 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
857 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
858 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
859 else
860 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
861 break;
862
863 case VINF_EM_RAW_TIMER_PENDING:
864 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
865 break;
866 case VINF_EM_RAW_INTERRUPT_PENDING:
867 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
868 break;
869 case VINF_VMM_CALL_HOST:
870 switch (pVCpu->vmm.s.enmCallRing3Operation)
871 {
872 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
873 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
874 break;
875 case VMMCALLRING3_PDM_LOCK:
876 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
877 break;
878 case VMMCALLRING3_PGM_POOL_GROW:
879 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
880 break;
881 case VMMCALLRING3_PGM_LOCK:
882 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
883 break;
884 case VMMCALLRING3_PGM_MAP_CHUNK:
885 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
886 break;
887 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
888 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
889 break;
890 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
891 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
892 break;
893 case VMMCALLRING3_VMM_LOGGER_FLUSH:
894 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
895 break;
896 case VMMCALLRING3_VM_SET_ERROR:
897 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
898 break;
899 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
900 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
901 break;
902 case VMMCALLRING3_VM_R0_ASSERTION:
903 default:
904 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
905 break;
906 }
907 break;
908 case VINF_PATM_DUPLICATE_FUNCTION:
909 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
910 break;
911 case VINF_PGM_CHANGE_MODE:
912 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
913 break;
914 case VINF_PGM_POOL_FLUSH_PENDING:
915 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
916 break;
917 case VINF_EM_PENDING_REQUEST:
918 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
919 break;
920 case VINF_EM_HM_PATCH_TPR_INSTR:
921 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
922 break;
923 default:
924 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
925 break;
926 }
927}
928#endif /* VBOX_WITH_STATISTICS */
929
930
931/**
932 * The Ring 0 entry point, called by the fast-ioctl path.
933 *
934 * @param pGVM The global (ring-0) VM structure.
935 * @param pVM The cross context VM structure.
936 * The return code is stored in pVM->vmm.s.iLastGZRc.
937 * @param idCpu The Virtual CPU ID of the calling EMT.
938 * @param enmOperation Which operation to execute.
939 * @remarks Assume called with interrupts _enabled_.
940 */
941VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
942{
943 /*
944 * Validation.
945 */
946 if ( idCpu < pGVM->cCpus
947 && pGVM->cCpus == pVM->cCpus)
948 { /*likely*/ }
949 else
950 {
951 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x/%#x\n", idCpu, pGVM->cCpus, pVM->cCpus);
952 return;
953 }
954
955 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
956 PVMCPU pVCpu = &pVM->aCpus[idCpu];
957 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
958 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
959 && pVCpu->hNativeThreadR0 == hNativeThread))
960 { /* likely */ }
961 else
962 {
963 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pVCpu->hNativeThreadR0=%p\n",
964 idCpu, hNativeThread, pGVCpu->hEMT, pVCpu->hNativeThreadR0);
965 return;
966 }
967
968 /*
969 * SMAP fun.
970 */
971 VMM_CHECK_SMAP_SETUP();
972 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
973
974 /*
975 * Perform requested operation.
976 */
977 switch (enmOperation)
978 {
979 /*
980 * Switch to GC and run guest raw mode code.
981 * Disable interrupts before doing the world switch.
982 */
983 case VMMR0_DO_RAW_RUN:
984 {
985#ifdef VBOX_WITH_RAW_MODE
986# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
987 /* Some safety precautions first. */
988 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
989 {
990 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
991 break;
992 }
993# endif
994
995 /*
996 * Disable preemption.
997 */
998 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
999 RTThreadPreemptDisable(&PreemptState);
1000
1001 /*
1002 * Get the host CPU identifiers, make sure they are valid and that
1003 * we've got a TSC delta for the CPU.
1004 */
1005 RTCPUID idHostCpu;
1006 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1007 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1008 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1009 {
1010 /*
1011 * Commit the CPU identifiers and update the periodict preemption timer if it's active.
1012 */
1013# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1014 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1015# endif
1016 pVCpu->iHostCpuSet = iHostCpuSet;
1017 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1018
1019 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1020 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1021
1022 /*
1023 * We might need to disable VT-x if the active switcher turns off paging.
1024 */
1025 bool fVTxDisabled;
1026 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1027 if (RT_SUCCESS(rc))
1028 {
1029 /*
1030 * Disable interrupts and run raw-mode code. The loop is for efficiently
1031 * dispatching tracepoints that fired in raw-mode context.
1032 */
1033 RTCCUINTREG uFlags = ASMIntDisableFlags();
1034
1035 for (;;)
1036 {
1037 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
1038 TMNotifyStartOfExecution(pVCpu);
1039
1040 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1041 pVCpu->vmm.s.iLastGZRc = rc;
1042
1043 TMNotifyEndOfExecution(pVCpu);
1044 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1045
1046 if (rc != VINF_VMM_CALL_TRACER)
1047 break;
1048 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
1049 }
1050
1051 /*
1052 * Re-enable VT-x before we dispatch any pending host interrupts and
1053 * re-enables interrupts.
1054 */
1055 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1056
1057 if ( rc == VINF_EM_RAW_INTERRUPT
1058 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1059 TRPMR0DispatchHostInterrupt(pVM);
1060
1061 ASMSetFlags(uFlags);
1062
1063 /* Fire dtrace probe and collect statistics. */
1064 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1065# ifdef VBOX_WITH_STATISTICS
1066 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1067 vmmR0RecordRC(pVM, pVCpu, rc);
1068# endif
1069 }
1070 else
1071 pVCpu->vmm.s.iLastGZRc = rc;
1072
1073 /*
1074 * Invalidate the host CPU identifiers as we restore preemption.
1075 */
1076 pVCpu->iHostCpuSet = UINT32_MAX;
1077 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1078
1079 RTThreadPreemptRestore(&PreemptState);
1080 }
1081 /*
1082 * Invalid CPU set index or TSC delta in need of measuring.
1083 */
1084 else
1085 {
1086 RTThreadPreemptRestore(&PreemptState);
1087 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1088 {
1089 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1090 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1091 0 /*default cTries*/);
1092 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1093 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1094 else
1095 pVCpu->vmm.s.iLastGZRc = rc;
1096 }
1097 else
1098 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1099 }
1100
1101#else /* !VBOX_WITH_RAW_MODE */
1102 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_NOT_SUPPORTED;
1103#endif
1104 break;
1105 }
1106
1107 /*
1108 * Run guest code using the available hardware acceleration technology.
1109 */
1110 case VMMR0_DO_HM_RUN:
1111 {
1112 /*
1113 * Disable preemption.
1114 */
1115 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1116 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1117 RTThreadPreemptDisable(&PreemptState);
1118
1119 /*
1120 * Get the host CPU identifiers, make sure they are valid and that
1121 * we've got a TSC delta for the CPU.
1122 */
1123 RTCPUID idHostCpu;
1124 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1125 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1126 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1127 {
1128 pVCpu->iHostCpuSet = iHostCpuSet;
1129 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1130
1131 /*
1132 * Update the periodic preemption timer if it's active.
1133 */
1134 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1135 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1136 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1137
1138#ifdef LOG_ENABLED
1139 /*
1140 * Ugly: Lazy registration of ring 0 loggers.
1141 */
1142 if (pVCpu->idCpu > 0)
1143 {
1144 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
1145 if ( pR0Logger
1146 && RT_UNLIKELY(!pR0Logger->fRegistered))
1147 {
1148 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
1149 pR0Logger->fRegistered = true;
1150 }
1151 }
1152#endif
1153
1154#ifdef VMM_R0_TOUCH_FPU
1155 /*
1156 * Make sure we've got the FPU state loaded so and we don't need to clear
1157 * CR0.TS and get out of sync with the host kernel when loading the guest
1158 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1159 */
1160 CPUMR0TouchHostFpu();
1161#endif
1162 int rc;
1163 bool fPreemptRestored = false;
1164 if (!HMR0SuspendPending())
1165 {
1166 /*
1167 * Enable the context switching hook.
1168 */
1169 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1170 {
1171 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1172 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1173 }
1174
1175 /*
1176 * Enter HM context.
1177 */
1178 rc = HMR0Enter(pVM, pVCpu);
1179 if (RT_SUCCESS(rc))
1180 {
1181 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1182
1183 /*
1184 * When preemption hooks are in place, enable preemption now that
1185 * we're in HM context.
1186 */
1187 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1188 {
1189 fPreemptRestored = true;
1190 RTThreadPreemptRestore(&PreemptState);
1191 }
1192
1193 /*
1194 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1195 */
1196 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1197 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1198 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1199
1200 /*
1201 * Assert sanity on the way out. Using manual assertions code here as normal
1202 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1203 */
1204 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1205 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1206 {
1207 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1208 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1209 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1210 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1211 }
1212 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1213 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1214 {
1215 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1216 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1217 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1218 rc = VERR_INVALID_STATE;
1219 }
1220
1221 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1222 }
1223 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1224
1225 /*
1226 * Invalidate the host CPU identifiers before we disable the context
1227 * hook / restore preemption.
1228 */
1229 pVCpu->iHostCpuSet = UINT32_MAX;
1230 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1231
1232 /*
1233 * Disable context hooks. Due to unresolved cleanup issues, we
1234 * cannot leave the hooks enabled when we return to ring-3.
1235 *
1236 * Note! At the moment HM may also have disabled the hook
1237 * when we get here, but the IPRT API handles that.
1238 */
1239 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1240 {
1241 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1242 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1243 }
1244 }
1245 /*
1246 * The system is about to go into suspend mode; go back to ring 3.
1247 */
1248 else
1249 {
1250 rc = VINF_EM_RAW_INTERRUPT;
1251 pVCpu->iHostCpuSet = UINT32_MAX;
1252 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1253 }
1254
1255 /** @todo When HM stops messing with the context hook state, we'll disable
1256 * preemption again before the RTThreadCtxHookDisable call. */
1257 if (!fPreemptRestored)
1258 RTThreadPreemptRestore(&PreemptState);
1259
1260 pVCpu->vmm.s.iLastGZRc = rc;
1261
1262 /* Fire dtrace probe and collect statistics. */
1263 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1264#ifdef VBOX_WITH_STATISTICS
1265 vmmR0RecordRC(pVM, pVCpu, rc);
1266#endif
1267 }
1268 /*
1269 * Invalid CPU set index or TSC delta in need of measuring.
1270 */
1271 else
1272 {
1273 pVCpu->iHostCpuSet = UINT32_MAX;
1274 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1275 RTThreadPreemptRestore(&PreemptState);
1276 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1277 {
1278 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1279 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1280 0 /*default cTries*/);
1281 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1282 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1283 else
1284 pVCpu->vmm.s.iLastGZRc = rc;
1285 }
1286 else
1287 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1288 }
1289 break;
1290 }
1291
1292 /*
1293 * For profiling.
1294 */
1295 case VMMR0_DO_NOP:
1296 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1297 break;
1298
1299 /*
1300 * Impossible.
1301 */
1302 default:
1303 AssertMsgFailed(("%#x\n", enmOperation));
1304 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1305 break;
1306 }
1307 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1308}
1309
1310
1311/**
1312 * Validates a session or VM session argument.
1313 *
1314 * @returns true / false accordingly.
1315 * @param pVM The cross context VM structure.
1316 * @param pClaimedSession The session claim to validate.
1317 * @param pSession The session argument.
1318 */
1319DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1320{
1321 /* This must be set! */
1322 if (!pSession)
1323 return false;
1324
1325 /* Only one out of the two. */
1326 if (pVM && pClaimedSession)
1327 return false;
1328 if (pVM)
1329 pClaimedSession = pVM->pSession;
1330 return pClaimedSession == pSession;
1331}
1332
1333
1334/**
1335 * VMMR0EntryEx worker function, either called directly or when ever possible
1336 * called thru a longjmp so we can exit safely on failure.
1337 *
1338 * @returns VBox status code.
1339 * @param pGVM The global (ring-0) VM structure.
1340 * @param pVM The cross context VM structure.
1341 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1342 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1343 * @param enmOperation Which operation to execute.
1344 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1345 * The support driver validates this if it's present.
1346 * @param u64Arg Some simple constant argument.
1347 * @param pSession The session of the caller.
1348 *
1349 * @remarks Assume called with interrupts _enabled_.
1350 */
1351static int vmmR0EntryExWorker(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1352 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1353{
1354 /*
1355 * Validate pGVM, pVM and idCpu for consistency and validity.
1356 */
1357 if ( pGVM != NULL
1358 || pVM != NULL)
1359 {
1360 if (RT_LIKELY( RT_VALID_PTR(pGVM)
1361 && RT_VALID_PTR(pVM)
1362 && ((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0))
1363 { /* likely */ }
1364 else
1365 {
1366 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p and/or pVM=%p! (op=%d)\n", pGVM, pVM, enmOperation);
1367 return VERR_INVALID_POINTER;
1368 }
1369
1370 if (RT_LIKELY(pGVM->pVM == pVM))
1371 { /* likely */ }
1372 else
1373 {
1374 SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM->pVM=%p\n", pVM, pGVM->pVM);
1375 return VERR_INVALID_PARAMETER;
1376 }
1377
1378 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1379 { /* likely */ }
1380 else
1381 {
1382 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1383 return VERR_INVALID_PARAMETER;
1384 }
1385
1386 if (RT_LIKELY( pVM->enmVMState >= VMSTATE_CREATING
1387 && pVM->enmVMState <= VMSTATE_TERMINATED
1388 && pVM->cCpus == pGVM->cCpus
1389 && pVM->pSession == pSession
1390 && pVM->pVMR0 == pVM))
1391 { /* likely */ }
1392 else
1393 {
1394 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{.enmVMState=%d, .cCpus=%#x(==%#x), .pSession=%p(==%p), .pVMR0=%p(==%p)}! (op=%d)\n",
1395 pVM, pVM->enmVMState, pVM->cCpus, pGVM->cCpus, pVM->pSession, pSession, pVM->pVMR0, pVM, enmOperation);
1396 return VERR_INVALID_POINTER;
1397 }
1398 }
1399 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1400 { /* likely */ }
1401 else
1402 {
1403 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1404 return VERR_INVALID_PARAMETER;
1405 }
1406
1407 /*
1408 * SMAP fun.
1409 */
1410 VMM_CHECK_SMAP_SETUP();
1411 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1412
1413 /*
1414 * Process the request.
1415 */
1416 int rc;
1417 switch (enmOperation)
1418 {
1419 /*
1420 * GVM requests
1421 */
1422 case VMMR0_DO_GVMM_CREATE_VM:
1423 if (pGVM == NULL && pVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1424 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1425 else
1426 rc = VERR_INVALID_PARAMETER;
1427 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1428 break;
1429
1430 case VMMR0_DO_GVMM_DESTROY_VM:
1431 if (pReqHdr == NULL && u64Arg == 0)
1432 rc = GVMMR0DestroyVM(pGVM, pVM);
1433 else
1434 rc = VERR_INVALID_PARAMETER;
1435 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1436 break;
1437
1438 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1439 if (pGVM != NULL && pVM != NULL)
1440 rc = GVMMR0RegisterVCpu(pGVM, pVM, idCpu);
1441 else
1442 rc = VERR_INVALID_PARAMETER;
1443 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1444 break;
1445
1446 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1447 if (pGVM != NULL && pVM != NULL)
1448 rc = GVMMR0DeregisterVCpu(pGVM, pVM, idCpu);
1449 else
1450 rc = VERR_INVALID_PARAMETER;
1451 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1452 break;
1453
1454 case VMMR0_DO_GVMM_SCHED_HALT:
1455 if (pReqHdr)
1456 return VERR_INVALID_PARAMETER;
1457 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1458 rc = GVMMR0SchedHalt(pGVM, pVM, idCpu, u64Arg);
1459 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1460 break;
1461
1462 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1463 if (pReqHdr || u64Arg)
1464 return VERR_INVALID_PARAMETER;
1465 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1466 rc = GVMMR0SchedWakeUp(pGVM, pVM, idCpu);
1467 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1468 break;
1469
1470 case VMMR0_DO_GVMM_SCHED_POKE:
1471 if (pReqHdr || u64Arg)
1472 return VERR_INVALID_PARAMETER;
1473 rc = GVMMR0SchedPoke(pGVM, pVM, idCpu);
1474 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1475 break;
1476
1477 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1478 if (u64Arg)
1479 return VERR_INVALID_PARAMETER;
1480 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1481 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1482 break;
1483
1484 case VMMR0_DO_GVMM_SCHED_POLL:
1485 if (pReqHdr || u64Arg > 1)
1486 return VERR_INVALID_PARAMETER;
1487 rc = GVMMR0SchedPoll(pGVM, pVM, idCpu, !!u64Arg);
1488 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1489 break;
1490
1491 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1492 if (u64Arg)
1493 return VERR_INVALID_PARAMETER;
1494 rc = GVMMR0QueryStatisticsReq(pGVM, pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1495 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1496 break;
1497
1498 case VMMR0_DO_GVMM_RESET_STATISTICS:
1499 if (u64Arg)
1500 return VERR_INVALID_PARAMETER;
1501 rc = GVMMR0ResetStatisticsReq(pGVM, pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1502 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1503 break;
1504
1505 /*
1506 * Initialize the R0 part of a VM instance.
1507 */
1508 case VMMR0_DO_VMMR0_INIT:
1509 rc = vmmR0InitVM(pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1510 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1511 break;
1512
1513 /*
1514 * Terminate the R0 part of a VM instance.
1515 */
1516 case VMMR0_DO_VMMR0_TERM:
1517 rc = VMMR0TermVM(pVM, NULL);
1518 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1519 break;
1520
1521 /*
1522 * Attempt to enable hm mode and check the current setting.
1523 */
1524 case VMMR0_DO_HM_ENABLE:
1525 rc = HMR0EnableAllCpus(pVM);
1526 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1527 break;
1528
1529 /*
1530 * Setup the hardware accelerated session.
1531 */
1532 case VMMR0_DO_HM_SETUP_VM:
1533 rc = HMR0SetupVM(pVM);
1534 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1535 break;
1536
1537 /*
1538 * Switch to RC to execute Hypervisor function.
1539 */
1540 case VMMR0_DO_CALL_HYPERVISOR:
1541 {
1542#ifdef VBOX_WITH_RAW_MODE
1543 /*
1544 * Validate input / context.
1545 */
1546 if (RT_UNLIKELY(idCpu != 0))
1547 return VERR_INVALID_CPU_ID;
1548 if (RT_UNLIKELY(pVM->cCpus != 1))
1549 return VERR_INVALID_PARAMETER;
1550 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1551# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1552 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1553 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1554# endif
1555
1556 /*
1557 * Disable interrupts.
1558 */
1559 RTCCUINTREG fFlags = ASMIntDisableFlags();
1560
1561 /*
1562 * Get the host CPU identifiers, make sure they are valid and that
1563 * we've got a TSC delta for the CPU.
1564 */
1565 RTCPUID idHostCpu;
1566 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1567 if (RT_UNLIKELY(iHostCpuSet >= RTCPUSET_MAX_CPUS))
1568 {
1569 ASMSetFlags(fFlags);
1570 return VERR_INVALID_CPU_INDEX;
1571 }
1572 if (RT_UNLIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1573 {
1574 ASMSetFlags(fFlags);
1575 rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1576 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1577 0 /*default cTries*/);
1578 if (RT_FAILURE(rc) && rc != VERR_CPU_OFFLINE)
1579 {
1580 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1581 return rc;
1582 }
1583 }
1584
1585 /*
1586 * Commit the CPU identifiers.
1587 */
1588# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1589 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1590# endif
1591 pVCpu->iHostCpuSet = iHostCpuSet;
1592 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1593
1594 /*
1595 * We might need to disable VT-x if the active switcher turns off paging.
1596 */
1597 bool fVTxDisabled;
1598 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1599 if (RT_SUCCESS(rc))
1600 {
1601 /*
1602 * Go through the wormhole...
1603 */
1604 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1605
1606 /*
1607 * Re-enable VT-x before we dispatch any pending host interrupts.
1608 */
1609 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1610
1611 if ( rc == VINF_EM_RAW_INTERRUPT
1612 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1613 TRPMR0DispatchHostInterrupt(pVM);
1614 }
1615
1616 /*
1617 * Invalidate the host CPU identifiers as we restore interrupts.
1618 */
1619 pVCpu->iHostCpuSet = UINT32_MAX;
1620 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1621 ASMSetFlags(fFlags);
1622
1623#else /* !VBOX_WITH_RAW_MODE */
1624 rc = VERR_RAW_MODE_NOT_SUPPORTED;
1625#endif
1626 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1627 break;
1628 }
1629
1630 /*
1631 * PGM wrappers.
1632 */
1633 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1634 if (idCpu == NIL_VMCPUID)
1635 return VERR_INVALID_CPU_ID;
1636 rc = PGMR0PhysAllocateHandyPages(pGVM, pVM, idCpu);
1637 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1638 break;
1639
1640 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1641 if (idCpu == NIL_VMCPUID)
1642 return VERR_INVALID_CPU_ID;
1643 rc = PGMR0PhysFlushHandyPages(pGVM, pVM, idCpu);
1644 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1645 break;
1646
1647 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1648 if (idCpu == NIL_VMCPUID)
1649 return VERR_INVALID_CPU_ID;
1650 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, pVM, idCpu);
1651 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1652 break;
1653
1654 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1655 if (idCpu != 0)
1656 return VERR_INVALID_CPU_ID;
1657 rc = PGMR0PhysSetupIoMmu(pGVM, pVM);
1658 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1659 break;
1660
1661 /*
1662 * GMM wrappers.
1663 */
1664 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1665 if (u64Arg)
1666 return VERR_INVALID_PARAMETER;
1667 rc = GMMR0InitialReservationReq(pGVM, pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1668 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1669 break;
1670
1671 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1672 if (u64Arg)
1673 return VERR_INVALID_PARAMETER;
1674 rc = GMMR0UpdateReservationReq(pGVM, pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1675 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1676 break;
1677
1678 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1679 if (u64Arg)
1680 return VERR_INVALID_PARAMETER;
1681 rc = GMMR0AllocatePagesReq(pGVM, pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1682 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1683 break;
1684
1685 case VMMR0_DO_GMM_FREE_PAGES:
1686 if (u64Arg)
1687 return VERR_INVALID_PARAMETER;
1688 rc = GMMR0FreePagesReq(pGVM, pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1689 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1690 break;
1691
1692 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1693 if (u64Arg)
1694 return VERR_INVALID_PARAMETER;
1695 rc = GMMR0FreeLargePageReq(pGVM, pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1696 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1697 break;
1698
1699 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1700 if (u64Arg)
1701 return VERR_INVALID_PARAMETER;
1702 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1703 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1704 break;
1705
1706 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1707 if (idCpu == NIL_VMCPUID)
1708 return VERR_INVALID_CPU_ID;
1709 if (u64Arg)
1710 return VERR_INVALID_PARAMETER;
1711 rc = GMMR0QueryMemoryStatsReq(pGVM, pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1712 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1713 break;
1714
1715 case VMMR0_DO_GMM_BALLOONED_PAGES:
1716 if (u64Arg)
1717 return VERR_INVALID_PARAMETER;
1718 rc = GMMR0BalloonedPagesReq(pGVM, pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1719 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1720 break;
1721
1722 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1723 if (u64Arg)
1724 return VERR_INVALID_PARAMETER;
1725 rc = GMMR0MapUnmapChunkReq(pGVM, pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1726 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1727 break;
1728
1729 case VMMR0_DO_GMM_SEED_CHUNK:
1730 if (pReqHdr)
1731 return VERR_INVALID_PARAMETER;
1732 rc = GMMR0SeedChunk(pGVM, pVM, idCpu, (RTR3PTR)u64Arg);
1733 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1734 break;
1735
1736 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1737 if (idCpu == NIL_VMCPUID)
1738 return VERR_INVALID_CPU_ID;
1739 if (u64Arg)
1740 return VERR_INVALID_PARAMETER;
1741 rc = GMMR0RegisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1742 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1743 break;
1744
1745 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1746 if (idCpu == NIL_VMCPUID)
1747 return VERR_INVALID_CPU_ID;
1748 if (u64Arg)
1749 return VERR_INVALID_PARAMETER;
1750 rc = GMMR0UnregisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1751 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1752 break;
1753
1754 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1755 if (idCpu == NIL_VMCPUID)
1756 return VERR_INVALID_CPU_ID;
1757 if ( u64Arg
1758 || pReqHdr)
1759 return VERR_INVALID_PARAMETER;
1760 rc = GMMR0ResetSharedModules(pGVM, pVM, idCpu);
1761 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1762 break;
1763
1764#ifdef VBOX_WITH_PAGE_SHARING
1765 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1766 {
1767 if (idCpu == NIL_VMCPUID)
1768 return VERR_INVALID_CPU_ID;
1769 if ( u64Arg
1770 || pReqHdr)
1771 return VERR_INVALID_PARAMETER;
1772 rc = GMMR0CheckSharedModules(pGVM, pVM, idCpu);
1773 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1774 break;
1775 }
1776#endif
1777
1778#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1779 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1780 if (u64Arg)
1781 return VERR_INVALID_PARAMETER;
1782 rc = GMMR0FindDuplicatePageReq(pGVM, pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1783 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1784 break;
1785#endif
1786
1787 case VMMR0_DO_GMM_QUERY_STATISTICS:
1788 if (u64Arg)
1789 return VERR_INVALID_PARAMETER;
1790 rc = GMMR0QueryStatisticsReq(pGVM, pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1791 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1792 break;
1793
1794 case VMMR0_DO_GMM_RESET_STATISTICS:
1795 if (u64Arg)
1796 return VERR_INVALID_PARAMETER;
1797 rc = GMMR0ResetStatisticsReq(pGVM, pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1798 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1799 break;
1800
1801 /*
1802 * A quick GCFGM mock-up.
1803 */
1804 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1805 case VMMR0_DO_GCFGM_SET_VALUE:
1806 case VMMR0_DO_GCFGM_QUERY_VALUE:
1807 {
1808 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1809 return VERR_INVALID_PARAMETER;
1810 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1811 if (pReq->Hdr.cbReq != sizeof(*pReq))
1812 return VERR_INVALID_PARAMETER;
1813 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1814 {
1815 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1816 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1817 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1818 }
1819 else
1820 {
1821 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1822 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1823 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1824 }
1825 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1826 break;
1827 }
1828
1829 /*
1830 * PDM Wrappers.
1831 */
1832 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1833 {
1834 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1835 return VERR_INVALID_PARAMETER;
1836 rc = PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1837 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1838 break;
1839 }
1840
1841 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1842 {
1843 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1844 return VERR_INVALID_PARAMETER;
1845 rc = PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1846 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1847 break;
1848 }
1849
1850 /*
1851 * Requests to the internal networking service.
1852 */
1853 case VMMR0_DO_INTNET_OPEN:
1854 {
1855 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1856 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1857 return VERR_INVALID_PARAMETER;
1858 rc = IntNetR0OpenReq(pSession, pReq);
1859 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1860 break;
1861 }
1862
1863 case VMMR0_DO_INTNET_IF_CLOSE:
1864 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1865 return VERR_INVALID_PARAMETER;
1866 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1867 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1868 break;
1869
1870
1871 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1872 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1873 return VERR_INVALID_PARAMETER;
1874 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1875 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1876 break;
1877
1878 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1879 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1880 return VERR_INVALID_PARAMETER;
1881 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1882 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1883 break;
1884
1885 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1886 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1887 return VERR_INVALID_PARAMETER;
1888 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1889 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1890 break;
1891
1892 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1893 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1894 return VERR_INVALID_PARAMETER;
1895 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1896 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1897 break;
1898
1899 case VMMR0_DO_INTNET_IF_SEND:
1900 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1901 return VERR_INVALID_PARAMETER;
1902 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1903 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1904 break;
1905
1906 case VMMR0_DO_INTNET_IF_WAIT:
1907 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1908 return VERR_INVALID_PARAMETER;
1909 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1910 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1911 break;
1912
1913 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1914 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1915 return VERR_INVALID_PARAMETER;
1916 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1917 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1918 break;
1919
1920#ifdef VBOX_WITH_PCI_PASSTHROUGH
1921 /*
1922 * Requests to host PCI driver service.
1923 */
1924 case VMMR0_DO_PCIRAW_REQ:
1925 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1926 return VERR_INVALID_PARAMETER;
1927 rc = PciRawR0ProcessReq(pSession, pVM, (PPCIRAWSENDREQ)pReqHdr);
1928 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1929 break;
1930#endif
1931 /*
1932 * For profiling.
1933 */
1934 case VMMR0_DO_NOP:
1935 case VMMR0_DO_SLOW_NOP:
1936 return VINF_SUCCESS;
1937
1938 /*
1939 * For testing Ring-0 APIs invoked in this environment.
1940 */
1941 case VMMR0_DO_TESTS:
1942 /** @todo make new test */
1943 return VINF_SUCCESS;
1944
1945
1946#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
1947 case VMMR0_DO_TEST_SWITCHER3264:
1948 if (idCpu == NIL_VMCPUID)
1949 return VERR_INVALID_CPU_ID;
1950 rc = HMR0TestSwitcher3264(pVM);
1951 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1952 break;
1953#endif
1954 default:
1955 /*
1956 * We're returning VERR_NOT_SUPPORT here so we've got something else
1957 * than -1 which the interrupt gate glue code might return.
1958 */
1959 Log(("operation %#x is not supported\n", enmOperation));
1960 return VERR_NOT_SUPPORTED;
1961 }
1962 return rc;
1963}
1964
1965
1966/**
1967 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1968 */
1969typedef struct VMMR0ENTRYEXARGS
1970{
1971 PGVM pGVM;
1972 PVM pVM;
1973 VMCPUID idCpu;
1974 VMMR0OPERATION enmOperation;
1975 PSUPVMMR0REQHDR pReq;
1976 uint64_t u64Arg;
1977 PSUPDRVSESSION pSession;
1978} VMMR0ENTRYEXARGS;
1979/** Pointer to a vmmR0EntryExWrapper argument package. */
1980typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1981
1982/**
1983 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1984 *
1985 * @returns VBox status code.
1986 * @param pvArgs The argument package
1987 */
1988static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
1989{
1990 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
1991 ((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1992 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1993 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1994 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1995 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1996 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1997}
1998
1999
2000/**
2001 * The Ring 0 entry point, called by the support library (SUP).
2002 *
2003 * @returns VBox status code.
2004 * @param pGVM The global (ring-0) VM structure.
2005 * @param pVM The cross context VM structure.
2006 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2007 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2008 * @param enmOperation Which operation to execute.
2009 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2010 * @param u64Arg Some simple constant argument.
2011 * @param pSession The session of the caller.
2012 * @remarks Assume called with interrupts _enabled_.
2013 */
2014VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2015 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2016{
2017 /*
2018 * Requests that should only happen on the EMT thread will be
2019 * wrapped in a setjmp so we can assert without causing trouble.
2020 */
2021 if ( pVM != NULL
2022 && pGVM != NULL
2023 && idCpu < pGVM->cCpus
2024 && pVM->pVMR0 != NULL)
2025 {
2026 switch (enmOperation)
2027 {
2028 /* These might/will be called before VMMR3Init. */
2029 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2030 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2031 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2032 case VMMR0_DO_GMM_FREE_PAGES:
2033 case VMMR0_DO_GMM_BALLOONED_PAGES:
2034 /* On the mac we might not have a valid jmp buf, so check these as well. */
2035 case VMMR0_DO_VMMR0_INIT:
2036 case VMMR0_DO_VMMR0_TERM:
2037 {
2038 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2039 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2040 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2041 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2042 && pVCpu->hNativeThreadR0 == hNativeThread))
2043 {
2044 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2045 break;
2046
2047 /** @todo validate this EMT claim... GVM knows. */
2048 VMMR0ENTRYEXARGS Args;
2049 Args.pGVM = pGVM;
2050 Args.pVM = pVM;
2051 Args.idCpu = idCpu;
2052 Args.enmOperation = enmOperation;
2053 Args.pReq = pReq;
2054 Args.u64Arg = u64Arg;
2055 Args.pSession = pSession;
2056 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2057 }
2058 return VERR_VM_THREAD_NOT_EMT;
2059 }
2060
2061 default:
2062 break;
2063 }
2064 }
2065 return vmmR0EntryExWorker(pGVM, pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2066}
2067
2068
2069/**
2070 * Checks whether we've armed the ring-0 long jump machinery.
2071 *
2072 * @returns @c true / @c false
2073 * @param pVCpu The cross context virtual CPU structure.
2074 * @thread EMT
2075 * @sa VMMIsLongJumpArmed
2076 */
2077VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
2078{
2079#ifdef RT_ARCH_X86
2080 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2081 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2082#else
2083 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2084 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2085#endif
2086}
2087
2088
2089/**
2090 * Checks whether we've done a ring-3 long jump.
2091 *
2092 * @returns @c true / @c false
2093 * @param pVCpu The cross context virtual CPU structure.
2094 * @thread EMT
2095 */
2096VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
2097{
2098 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2099}
2100
2101
2102/**
2103 * Internal R0 logger worker: Flush logger.
2104 *
2105 * @param pLogger The logger instance to flush.
2106 * @remark This function must be exported!
2107 */
2108VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2109{
2110#ifdef LOG_ENABLED
2111 /*
2112 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2113 * (This is a bit paranoid code.)
2114 */
2115 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2116 if ( !VALID_PTR(pR0Logger)
2117 || !VALID_PTR(pR0Logger + 1)
2118 || pLogger->u32Magic != RTLOGGER_MAGIC)
2119 {
2120# ifdef DEBUG
2121 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2122# endif
2123 return;
2124 }
2125 if (pR0Logger->fFlushingDisabled)
2126 return; /* quietly */
2127
2128 PVM pVM = pR0Logger->pVM;
2129 if ( !VALID_PTR(pVM)
2130 || pVM->pVMR0 != pVM)
2131 {
2132# ifdef DEBUG
2133 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
2134# endif
2135 return;
2136 }
2137
2138 PVMCPU pVCpu = VMMGetCpu(pVM);
2139 if (pVCpu)
2140 {
2141 /*
2142 * Check that the jump buffer is armed.
2143 */
2144# ifdef RT_ARCH_X86
2145 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2146 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2147# else
2148 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2149 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2150# endif
2151 {
2152# ifdef DEBUG
2153 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2154# endif
2155 return;
2156 }
2157 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2158 }
2159# ifdef DEBUG
2160 else
2161 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2162# endif
2163#else
2164 NOREF(pLogger);
2165#endif /* LOG_ENABLED */
2166}
2167
2168/**
2169 * Internal R0 logger worker: Custom prefix.
2170 *
2171 * @returns Number of chars written.
2172 *
2173 * @param pLogger The logger instance.
2174 * @param pchBuf The output buffer.
2175 * @param cchBuf The size of the buffer.
2176 * @param pvUser User argument (ignored).
2177 */
2178VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
2179{
2180 NOREF(pvUser);
2181#ifdef LOG_ENABLED
2182 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2183 if ( !VALID_PTR(pR0Logger)
2184 || !VALID_PTR(pR0Logger + 1)
2185 || pLogger->u32Magic != RTLOGGER_MAGIC
2186 || cchBuf < 2)
2187 return 0;
2188
2189 static const char s_szHex[17] = "0123456789abcdef";
2190 VMCPUID const idCpu = pR0Logger->idCpu;
2191 pchBuf[1] = s_szHex[ idCpu & 15];
2192 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
2193
2194 return 2;
2195#else
2196 NOREF(pLogger); NOREF(pchBuf); NOREF(cchBuf);
2197 return 0;
2198#endif
2199}
2200
2201#ifdef LOG_ENABLED
2202
2203/**
2204 * Disables flushing of the ring-0 debug log.
2205 *
2206 * @param pVCpu The cross context virtual CPU structure.
2207 */
2208VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
2209{
2210 if (pVCpu->vmm.s.pR0LoggerR0)
2211 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2212}
2213
2214
2215/**
2216 * Enables flushing of the ring-0 debug log.
2217 *
2218 * @param pVCpu The cross context virtual CPU structure.
2219 */
2220VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
2221{
2222 if (pVCpu->vmm.s.pR0LoggerR0)
2223 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2224}
2225
2226
2227/**
2228 * Checks if log flushing is disabled or not.
2229 *
2230 * @param pVCpu The cross context virtual CPU structure.
2231 */
2232VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
2233{
2234 if (pVCpu->vmm.s.pR0LoggerR0)
2235 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2236 return true;
2237}
2238#endif /* LOG_ENABLED */
2239
2240/**
2241 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2242 *
2243 * @returns true if the breakpoint should be hit, false if it should be ignored.
2244 */
2245DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2246{
2247#if 0
2248 return true;
2249#else
2250 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2251 if (pVM)
2252 {
2253 PVMCPU pVCpu = VMMGetCpu(pVM);
2254
2255 if (pVCpu)
2256 {
2257#ifdef RT_ARCH_X86
2258 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2259 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2260#else
2261 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2262 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2263#endif
2264 {
2265 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2266 return RT_FAILURE_NP(rc);
2267 }
2268 }
2269 }
2270#ifdef RT_OS_LINUX
2271 return true;
2272#else
2273 return false;
2274#endif
2275#endif
2276}
2277
2278
2279/**
2280 * Override this so we can push it up to ring-3.
2281 *
2282 * @param pszExpr Expression. Can be NULL.
2283 * @param uLine Location line number.
2284 * @param pszFile Location file name.
2285 * @param pszFunction Location function name.
2286 */
2287DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2288{
2289 /*
2290 * To the log.
2291 */
2292 LogAlways(("\n!!R0-Assertion Failed!!\n"
2293 "Expression: %s\n"
2294 "Location : %s(%d) %s\n",
2295 pszExpr, pszFile, uLine, pszFunction));
2296
2297 /*
2298 * To the global VMM buffer.
2299 */
2300 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2301 if (pVM)
2302 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2303 "\n!!R0-Assertion Failed!!\n"
2304 "Expression: %.*s\n"
2305 "Location : %s(%d) %s\n",
2306 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2307 pszFile, uLine, pszFunction);
2308
2309 /*
2310 * Continue the normal way.
2311 */
2312 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2313}
2314
2315
2316/**
2317 * Callback for RTLogFormatV which writes to the ring-3 log port.
2318 * See PFNLOGOUTPUT() for details.
2319 */
2320static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2321{
2322 for (size_t i = 0; i < cbChars; i++)
2323 {
2324 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2325 }
2326
2327 NOREF(pv);
2328 return cbChars;
2329}
2330
2331
2332/**
2333 * Override this so we can push it up to ring-3.
2334 *
2335 * @param pszFormat The format string.
2336 * @param va Arguments.
2337 */
2338DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2339{
2340 va_list vaCopy;
2341
2342 /*
2343 * Push the message to the loggers.
2344 */
2345 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2346 if (pLog)
2347 {
2348 va_copy(vaCopy, va);
2349 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2350 va_end(vaCopy);
2351 }
2352 pLog = RTLogRelGetDefaultInstance();
2353 if (pLog)
2354 {
2355 va_copy(vaCopy, va);
2356 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2357 va_end(vaCopy);
2358 }
2359
2360 /*
2361 * Push it to the global VMM buffer.
2362 */
2363 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2364 if (pVM)
2365 {
2366 va_copy(vaCopy, va);
2367 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2368 va_end(vaCopy);
2369 }
2370
2371 /*
2372 * Continue the normal way.
2373 */
2374 RTAssertMsg2V(pszFormat, va);
2375}
2376
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette