VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 47797

Last change on this file since 47797 was 47760, checked in by vboxsync, 11 years ago

VMM/HM: Preemption hooks. Some common structural changes and cleanup, and initial imlementation
of VT-x/AMD-V specific hook functionality.. Work in progress.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 59.2 KB
Line 
1/* $Id: VMMR0.cpp 47760 2013-08-15 12:57:02Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VMM
22#include <VBox/vmm/vmm.h>
23#include <VBox/sup.h>
24#include <VBox/vmm/trpm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/stam.h>
29#include <VBox/vmm/tm.h>
30#include "VMMInternal.h"
31#include <VBox/vmm/vm.h>
32#ifdef VBOX_WITH_PCI_PASSTHROUGH
33# include <VBox/vmm/pdmpci.h>
34#endif
35
36#include <VBox/vmm/gvmm.h>
37#include <VBox/vmm/gmm.h>
38#include <VBox/intnet.h>
39#include <VBox/vmm/hm.h>
40#include <VBox/param.h>
41#include <VBox/err.h>
42#include <VBox/version.h>
43#include <VBox/log.h>
44
45#include <iprt/asm-amd64-x86.h>
46#include <iprt/assert.h>
47#include <iprt/crc.h>
48#include <iprt/mp.h>
49#include <iprt/once.h>
50#include <iprt/stdarg.h>
51#include <iprt/string.h>
52#include <iprt/thread.h>
53#include <iprt/timer.h>
54
55#include "dtrace/VBoxVMM.h"
56
57
58#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
59# pragma intrinsic(_AddressOfReturnAddress)
60#endif
61
62
63/*******************************************************************************
64* Internal Functions *
65*******************************************************************************/
66RT_C_DECLS_BEGIN
67#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
68extern uint64_t __udivdi3(uint64_t, uint64_t);
69extern uint64_t __umoddi3(uint64_t, uint64_t);
70#endif
71RT_C_DECLS_END
72
73
74/*******************************************************************************
75* Global Variables *
76*******************************************************************************/
77/** Drag in necessary library bits.
78 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
79PFNRT g_VMMGCDeps[] =
80{
81 (PFNRT)RTCrc32,
82 (PFNRT)RTOnce,
83#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
84 (PFNRT)__udivdi3,
85 (PFNRT)__umoddi3,
86#endif
87 NULL
88};
89
90#ifdef RT_OS_SOLARIS
91/* Dependency information for the native solaris loader. */
92extern "C" { char _depends_on[] = "vboxdrv"; }
93#endif
94
95
96
97/**
98 * Initialize the module.
99 * This is called when we're first loaded.
100 *
101 * @returns 0 on success.
102 * @returns VBox status on failure.
103 * @param hMod Image handle for use in APIs.
104 */
105DECLEXPORT(int) ModuleInit(void *hMod)
106{
107#ifdef VBOX_WITH_DTRACE_R0
108 /*
109 * The first thing to do is register the static tracepoints.
110 * (Deregistration is automatic.)
111 */
112 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
113 if (RT_FAILURE(rc2))
114 return rc2;
115#endif
116 LogFlow(("ModuleInit:\n"));
117
118 /*
119 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
120 */
121 int rc = vmmInitFormatTypes();
122 if (RT_SUCCESS(rc))
123 {
124 rc = GVMMR0Init();
125 if (RT_SUCCESS(rc))
126 {
127 rc = GMMR0Init();
128 if (RT_SUCCESS(rc))
129 {
130 rc = HMR0Init();
131 if (RT_SUCCESS(rc))
132 {
133 rc = PGMRegisterStringFormatTypes();
134 if (RT_SUCCESS(rc))
135 {
136#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
137 rc = PGMR0DynMapInit();
138#endif
139 if (RT_SUCCESS(rc))
140 {
141 rc = IntNetR0Init();
142 if (RT_SUCCESS(rc))
143 {
144#ifdef VBOX_WITH_PCI_PASSTHROUGH
145 rc = PciRawR0Init();
146#endif
147 if (RT_SUCCESS(rc))
148 {
149 rc = CPUMR0ModuleInit();
150 if (RT_SUCCESS(rc))
151 {
152#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
153 rc = vmmR0TripleFaultHackInit();
154 if (RT_SUCCESS(rc))
155#endif
156 {
157 LogFlow(("ModuleInit: returns success.\n"));
158 return VINF_SUCCESS;
159 }
160
161 /*
162 * Bail out.
163 */
164#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
165 vmmR0TripleFaultHackTerm();
166#endif
167 }
168 else
169 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
170#ifdef VBOX_WITH_PCI_PASSTHROUGH
171 PciRawR0Term();
172#endif
173 }
174 else
175 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
176 IntNetR0Term();
177 }
178 else
179 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
180#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
181 PGMR0DynMapTerm();
182#endif
183 }
184 else
185 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
186 PGMDeregisterStringFormatTypes();
187 }
188 else
189 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
190 HMR0Term();
191 }
192 else
193 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
194 GMMR0Term();
195 }
196 else
197 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
198 GVMMR0Term();
199 }
200 else
201 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
202 vmmTermFormatTypes();
203 }
204 else
205 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
206
207 LogFlow(("ModuleInit: failed %Rrc\n", rc));
208 return rc;
209}
210
211
212/**
213 * Terminate the module.
214 * This is called when we're finally unloaded.
215 *
216 * @param hMod Image handle for use in APIs.
217 */
218DECLEXPORT(void) ModuleTerm(void *hMod)
219{
220 LogFlow(("ModuleTerm:\n"));
221
222 /*
223 * Terminate the CPUM module (Local APIC cleanup).
224 */
225 CPUMR0ModuleTerm();
226
227 /*
228 * Terminate the internal network service.
229 */
230 IntNetR0Term();
231
232 /*
233 * PGM (Darwin), HM and PciRaw global cleanup.
234 */
235#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
236 PGMR0DynMapTerm();
237#endif
238#ifdef VBOX_WITH_PCI_PASSTHROUGH
239 PciRawR0Term();
240#endif
241 PGMDeregisterStringFormatTypes();
242 HMR0Term();
243#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
244 vmmR0TripleFaultHackTerm();
245#endif
246
247 /*
248 * Destroy the GMM and GVMM instances.
249 */
250 GMMR0Term();
251 GVMMR0Term();
252
253 vmmTermFormatTypes();
254
255 LogFlow(("ModuleTerm: returns\n"));
256}
257
258
259/**
260 * Initiates the R0 driver for a particular VM instance.
261 *
262 * @returns VBox status code.
263 *
264 * @param pVM Pointer to the VM.
265 * @param uSvnRev The SVN revision of the ring-3 part.
266 * @param uBuildType Build type indicator.
267 * @thread EMT.
268 */
269static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
270{
271 /*
272 * Match the SVN revisions and build type.
273 */
274 if (uSvnRev != VMMGetSvnRev())
275 {
276 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
277 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
278 return VERR_VMM_R0_VERSION_MISMATCH;
279 }
280 if (uBuildType != vmmGetBuildType())
281 {
282 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
283 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
284 return VERR_VMM_R0_VERSION_MISMATCH;
285 }
286 if ( !VALID_PTR(pVM)
287 || pVM->pVMR0 != pVM)
288 return VERR_INVALID_PARAMETER;
289
290
291#ifdef LOG_ENABLED
292 /*
293 * Register the EMT R0 logger instance for VCPU 0.
294 */
295 PVMCPU pVCpu = &pVM->aCpus[0];
296
297 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
298 if (pR0Logger)
299 {
300# if 0 /* testing of the logger. */
301 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
302 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
303 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
304 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
305
306 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
307 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
308 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
309 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
310
311 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
312 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
313 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
314 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
315
316 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
317 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
318 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
319 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
320 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
321 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
322
323 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
324 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
325
326 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
327 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
328 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
329# endif
330 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
331 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
332 pR0Logger->fRegistered = true;
333 }
334#endif /* LOG_ENABLED */
335
336 /*
337 * Check if the host supports high resolution timers or not.
338 */
339 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
340 && !RTTimerCanDoHighResolution())
341 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
342
343 /*
344 * Initialize the per VM data for GVMM and GMM.
345 */
346 int rc = GVMMR0InitVM(pVM);
347// if (RT_SUCCESS(rc))
348// rc = GMMR0InitPerVMData(pVM);
349 if (RT_SUCCESS(rc))
350 {
351 /*
352 * Init HM, CPUM and PGM (Darwin only).
353 */
354 rc = HMR0InitVM(pVM);
355 if (RT_SUCCESS(rc))
356 {
357 rc = CPUMR0Init(pVM); /** @todo rename to CPUMR0InitVM */
358 if (RT_SUCCESS(rc))
359 {
360#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
361 rc = PGMR0DynMapInitVM(pVM);
362#endif
363 if (RT_SUCCESS(rc))
364 {
365#ifdef VBOX_WITH_PCI_PASSTHROUGH
366 rc = PciRawR0InitVM(pVM);
367#endif
368 if (RT_SUCCESS(rc))
369 {
370 GVMMR0DoneInitVM(pVM);
371 return rc;
372 }
373 }
374
375 /* bail out */
376 }
377#ifdef VBOX_WITH_PCI_PASSTHROUGH
378 PciRawR0TermVM(pVM);
379#endif
380 HMR0TermVM(pVM);
381 }
382 }
383
384
385 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
386 return rc;
387}
388
389
390/**
391 * Terminates the R0 bits for a particular VM instance.
392 *
393 * This is normally called by ring-3 as part of the VM termination process, but
394 * may alternatively be called during the support driver session cleanup when
395 * the VM object is destroyed (see GVMM).
396 *
397 * @returns VBox status code.
398 *
399 * @param pVM Pointer to the VM.
400 * @param pGVM Pointer to the global VM structure. Optional.
401 * @thread EMT or session clean up thread.
402 */
403VMMR0DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
404{
405#ifdef VBOX_WITH_PCI_PASSTHROUGH
406 PciRawR0TermVM(pVM);
407#endif
408
409
410 /*
411 * Tell GVMM what we're up to and check that we only do this once.
412 */
413 if (GVMMR0DoingTermVM(pVM, pGVM))
414 {
415 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
416 * here to make sure we don't leak any shared pages if we crash... */
417#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
418 PGMR0DynMapTermVM(pVM);
419#endif
420 HMR0TermVM(pVM);
421 }
422
423 /*
424 * Deregister the logger.
425 */
426 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
427 return VINF_SUCCESS;
428}
429
430
431/**
432 * Creates R0 thread-context hooks for the current EMT thread.
433 *
434 * @returns VBox status code.
435 *
436 * @param pVCpu Pointer to the VMCPU.
437 * @thread EMT.
438 */
439VMMR0DECL(int) VMMR0ThreadCtxHooksCreate(PVMCPU pVCpu)
440{
441 VMCPU_ASSERT_EMT(pVCpu);
442 Assert(pVCpu->vmm.s.hR0ThreadCtx == NIL_RTTHREADCTX);
443 int rc = RTThreadCtxHooksCreate(&pVCpu->vmm.s.hR0ThreadCtx);
444 if ( RT_SUCCESS(rc)
445 || rc == VERR_NOT_SUPPORTED)
446 {
447 return VINF_SUCCESS;
448 }
449
450 Log(("RTThreadCtxHooksCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
451 return rc;
452}
453
454
455/**
456 * Releases the object reference for the thread-context hook.
457 *
458 * @param pVCpu Pointer to the VMCPU.
459 * @remarks Can be called from any thread.
460 */
461VMMR0DECL(void) VMMR0ThreadCtxHooksRelease(PVMCPU pVCpu)
462{
463 RTThreadCtxHooksRelease(pVCpu->vmm.s.hR0ThreadCtx);
464}
465
466
467/**
468 * Registers the thread-context hook for this VCPU.
469 *
470 * @param pVCpu Pointer to the VMCPU.
471 * @param pfnThreadHook Pointer to the thread-context callback.
472 * @returns VBox status code.
473 *
474 * @thread EMT.
475 */
476VMMR0DECL(int) VMMR0ThreadCtxHooksRegister(PVMCPU pVCpu, PFNRTTHREADCTXHOOK pfnThreadHook)
477{
478 return RTThreadCtxHooksRegister(pVCpu->vmm.s.hR0ThreadCtx, pfnThreadHook, pVCpu);
479}
480
481
482/**
483 * Deregisters the thread-context hook for this VCPU.
484 *
485 * @returns VBox status code.
486 * @param pVCpu Pointer to the VMCPU.
487 * @thread EMT.
488 */
489VMMR0DECL(int) VMMR0ThreadCtxHooksDeregister(PVMCPU pVCpu)
490{
491 return RTThreadCtxHooksDeregister(pVCpu->vmm.s.hR0ThreadCtx);
492}
493
494
495/**
496 * Whether thread-context hooks are created (implying they're supported) on this
497 * platform.
498 *
499 * @returns true if the hooks are created, false otherwise.
500 * @param pVCpu Pointer to the VMCPU.
501 *
502 * @remarks Can be called from any thread.
503 */
504VMMR0DECL(bool) VMMR0ThreadCtxHooksAreCreated(PVMCPU pVCpu)
505{
506 return pVCpu->vmm.s.hR0ThreadCtx != NIL_RTTHREADCTX;
507}
508
509
510/**
511 * Whether thread-context hooks are registered for this VCPU.
512 *
513 * @returns true if registered, false otherwise.
514 * @param pVCpu Pointer to the VMCPU.
515 */
516VMMR0DECL(bool) VMMR0ThreadCtxHooksAreRegistered(PVMCPU pVCpu)
517{
518 return RTThreadCtxHooksAreRegistered(pVCpu->vmm.s.hR0ThreadCtx);
519}
520
521
522#ifdef VBOX_WITH_STATISTICS
523/**
524 * Record return code statistics
525 * @param pVM Pointer to the VM.
526 * @param pVCpu Pointer to the VMCPU.
527 * @param rc The status code.
528 */
529static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
530{
531 /*
532 * Collect statistics.
533 */
534 switch (rc)
535 {
536 case VINF_SUCCESS:
537 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
538 break;
539 case VINF_EM_RAW_INTERRUPT:
540 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
541 break;
542 case VINF_EM_RAW_INTERRUPT_HYPER:
543 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
544 break;
545 case VINF_EM_RAW_GUEST_TRAP:
546 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
547 break;
548 case VINF_EM_RAW_RING_SWITCH:
549 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
550 break;
551 case VINF_EM_RAW_RING_SWITCH_INT:
552 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
553 break;
554 case VINF_EM_RAW_STALE_SELECTOR:
555 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
556 break;
557 case VINF_EM_RAW_IRET_TRAP:
558 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
559 break;
560 case VINF_IOM_R3_IOPORT_READ:
561 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
562 break;
563 case VINF_IOM_R3_IOPORT_WRITE:
564 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
565 break;
566 case VINF_IOM_R3_MMIO_READ:
567 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
568 break;
569 case VINF_IOM_R3_MMIO_WRITE:
570 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
571 break;
572 case VINF_IOM_R3_MMIO_READ_WRITE:
573 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
574 break;
575 case VINF_PATM_HC_MMIO_PATCH_READ:
576 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
577 break;
578 case VINF_PATM_HC_MMIO_PATCH_WRITE:
579 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
580 break;
581 case VINF_EM_RAW_EMULATE_INSTR:
582 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
583 break;
584 case VINF_EM_RAW_EMULATE_IO_BLOCK:
585 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
586 break;
587 case VINF_PATCH_EMULATE_INSTR:
588 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
589 break;
590 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
591 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
592 break;
593 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
594 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
595 break;
596 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
597 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
598 break;
599 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
600 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
601 break;
602 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
603 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPDFault);
604 break;
605 case VINF_CSAM_PENDING_ACTION:
606 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
607 break;
608 case VINF_PGM_SYNC_CR3:
609 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
610 break;
611 case VINF_PATM_PATCH_INT3:
612 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
613 break;
614 case VINF_PATM_PATCH_TRAP_PF:
615 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
616 break;
617 case VINF_PATM_PATCH_TRAP_GP:
618 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
619 break;
620 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
621 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
622 break;
623 case VINF_EM_RESCHEDULE_REM:
624 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
625 break;
626 case VINF_EM_RAW_TO_R3:
627 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
628 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
629 else if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
630 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
631 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES))
632 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
633 else if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
634 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
635 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
636 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
637 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))
638 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
639 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
640 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
641 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3))
642 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
643 else
644 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
645 break;
646
647 case VINF_EM_RAW_TIMER_PENDING:
648 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
649 break;
650 case VINF_EM_RAW_INTERRUPT_PENDING:
651 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
652 break;
653 case VINF_VMM_CALL_HOST:
654 switch (pVCpu->vmm.s.enmCallRing3Operation)
655 {
656 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
657 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
658 break;
659 case VMMCALLRING3_PDM_LOCK:
660 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
661 break;
662 case VMMCALLRING3_PGM_POOL_GROW:
663 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
664 break;
665 case VMMCALLRING3_PGM_LOCK:
666 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
667 break;
668 case VMMCALLRING3_PGM_MAP_CHUNK:
669 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
670 break;
671 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
672 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
673 break;
674 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
675 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
676 break;
677 case VMMCALLRING3_VMM_LOGGER_FLUSH:
678 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
679 break;
680 case VMMCALLRING3_VM_SET_ERROR:
681 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
682 break;
683 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
684 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
685 break;
686 case VMMCALLRING3_VM_R0_ASSERTION:
687 default:
688 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
689 break;
690 }
691 break;
692 case VINF_PATM_DUPLICATE_FUNCTION:
693 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
694 break;
695 case VINF_PGM_CHANGE_MODE:
696 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
697 break;
698 case VINF_PGM_POOL_FLUSH_PENDING:
699 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
700 break;
701 case VINF_EM_PENDING_REQUEST:
702 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
703 break;
704 case VINF_EM_HM_PATCH_TPR_INSTR:
705 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
706 break;
707 default:
708 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
709 break;
710 }
711}
712#endif /* VBOX_WITH_STATISTICS */
713
714
715/**
716 * Unused ring-0 entry point that used to be called from the interrupt gate.
717 *
718 * Will be removed one of the next times we do a major SUPDrv version bump.
719 *
720 * @returns VBox status code.
721 * @param pVM Pointer to the VM.
722 * @param enmOperation Which operation to execute.
723 * @param pvArg Argument to the operation.
724 * @remarks Assume called with interrupts disabled.
725 */
726VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
727{
728 /*
729 * We're returning VERR_NOT_SUPPORT here so we've got something else
730 * than -1 which the interrupt gate glue code might return.
731 */
732 Log(("operation %#x is not supported\n", enmOperation));
733 NOREF(enmOperation); NOREF(pvArg); NOREF(pVM);
734 return VERR_NOT_SUPPORTED;
735}
736
737
738/**
739 * The Ring 0 entry point, called by the fast-ioctl path.
740 *
741 * @param pVM Pointer to the VM.
742 * The return code is stored in pVM->vmm.s.iLastGZRc.
743 * @param idCpu The Virtual CPU ID of the calling EMT.
744 * @param enmOperation Which operation to execute.
745 * @remarks Assume called with interrupts _enabled_.
746 */
747VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
748{
749 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
750 return;
751 PVMCPU pVCpu = &pVM->aCpus[idCpu];
752
753 switch (enmOperation)
754 {
755 /*
756 * Switch to GC and run guest raw mode code.
757 * Disable interrupts before doing the world switch.
758 */
759 case VMMR0_DO_RAW_RUN:
760 {
761#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
762 /* Some safety precautions first. */
763 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
764 {
765 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
766 break;
767 }
768#endif
769
770 /* Disable preemption and update the periodic preemption timer. */
771 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
772 RTThreadPreemptDisable(&PreemptState);
773 RTCPUID idHostCpu = RTMpCpuId();
774#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
775 CPUMR0SetLApic(pVM, idHostCpu);
776#endif
777 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
778 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
779 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
780
781 /* We might need to disable VT-x if the active switcher turns off paging. */
782 bool fVTxDisabled;
783 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
784 if (RT_SUCCESS(rc))
785 {
786 RTCCUINTREG uFlags = ASMIntDisableFlags();
787
788 for (;;)
789 {
790 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
791 TMNotifyStartOfExecution(pVCpu);
792
793 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
794 pVCpu->vmm.s.iLastGZRc = rc;
795
796 TMNotifyEndOfExecution(pVCpu);
797 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
798
799 if (rc != VINF_VMM_CALL_TRACER)
800 break;
801 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
802 }
803
804 /* Re-enable VT-x if previously turned off. */
805 HMR0LeaveSwitcher(pVM, fVTxDisabled);
806
807 if ( rc == VINF_EM_RAW_INTERRUPT
808 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
809 TRPMR0DispatchHostInterrupt(pVM);
810
811 ASMSetFlags(uFlags);
812
813#ifdef VBOX_WITH_STATISTICS
814 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
815 vmmR0RecordRC(pVM, pVCpu, rc);
816#endif
817 }
818 else
819 pVCpu->vmm.s.iLastGZRc = rc;
820 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
821 RTThreadPreemptRestore(&PreemptState);
822 break;
823 }
824
825 /*
826 * Run guest code using the available hardware acceleration technology.
827 *
828 * Disable interrupts before we do anything interesting. On Windows we avoid
829 * this by having the support driver raise the IRQL before calling us, this way
830 * we hope to get away with page faults and later calling into the kernel.
831 */
832 case VMMR0_DO_HM_RUN:
833 {
834#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
835 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
836 RTThreadPreemptDisable(&PreemptState);
837#elif !defined(RT_OS_WINDOWS)
838 RTCCUINTREG uFlags = ASMIntDisableFlags();
839#endif
840 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId());
841 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
842 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
843
844#ifdef LOG_ENABLED
845 if (pVCpu->idCpu > 0)
846 {
847 /* Lazy registration of ring 0 loggers. */
848 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
849 if ( pR0Logger
850 && !pR0Logger->fRegistered)
851 {
852 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
853 pR0Logger->fRegistered = true;
854 }
855 }
856#endif
857 int rc;
858 if (!HMR0SuspendPending())
859 {
860 /** @todo VMMR0ThreadCtxHooks support. */
861 rc = HMR0Enter(pVM, pVCpu);
862 if (RT_SUCCESS(rc))
863 {
864 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
865
866 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
867 int rc2 = HMR0Leave(pVM, pVCpu);
868 AssertRC(rc2);
869
870 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
871 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
872 {
873 /* Manual assert as normal assertions are going to crash in this case. */
874 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
875 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
876 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
877 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
878 }
879 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
880 }
881 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
882 }
883 else
884 {
885 /* System is about to go into suspend mode; go back to ring 3. */
886 rc = VINF_EM_RAW_INTERRUPT;
887 }
888 pVCpu->vmm.s.iLastGZRc = rc;
889
890 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
891#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
892 RTThreadPreemptRestore(&PreemptState);
893#elif !defined(RT_OS_WINDOWS)
894 ASMSetFlags(uFlags);
895#endif
896
897#ifdef VBOX_WITH_STATISTICS
898 vmmR0RecordRC(pVM, pVCpu, rc);
899#endif
900 /* No special action required for external interrupts, just return. */
901 break;
902 }
903
904 /*
905 * For profiling.
906 */
907 case VMMR0_DO_NOP:
908 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
909 break;
910
911 /*
912 * Impossible.
913 */
914 default:
915 AssertMsgFailed(("%#x\n", enmOperation));
916 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
917 break;
918 }
919}
920
921
922/**
923 * Validates a session or VM session argument.
924 *
925 * @returns true / false accordingly.
926 * @param pVM Pointer to the VM.
927 * @param pSession The session argument.
928 */
929DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
930{
931 /* This must be set! */
932 if (!pSession)
933 return false;
934
935 /* Only one out of the two. */
936 if (pVM && pClaimedSession)
937 return false;
938 if (pVM)
939 pClaimedSession = pVM->pSession;
940 return pClaimedSession == pSession;
941}
942
943
944/**
945 * VMMR0EntryEx worker function, either called directly or when ever possible
946 * called thru a longjmp so we can exit safely on failure.
947 *
948 * @returns VBox status code.
949 * @param pVM Pointer to the VM.
950 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
951 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
952 * @param enmOperation Which operation to execute.
953 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
954 * The support driver validates this if it's present.
955 * @param u64Arg Some simple constant argument.
956 * @param pSession The session of the caller.
957 * @remarks Assume called with interrupts _enabled_.
958 */
959static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
960{
961 /*
962 * Common VM pointer validation.
963 */
964 if (pVM)
965 {
966 if (RT_UNLIKELY( !VALID_PTR(pVM)
967 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
968 {
969 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
970 return VERR_INVALID_POINTER;
971 }
972 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
973 || pVM->enmVMState > VMSTATE_TERMINATED
974 || pVM->pVMR0 != pVM))
975 {
976 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
977 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
978 return VERR_INVALID_POINTER;
979 }
980
981 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
982 {
983 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
984 return VERR_INVALID_PARAMETER;
985 }
986 }
987 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
988 {
989 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
990 return VERR_INVALID_PARAMETER;
991 }
992
993
994 switch (enmOperation)
995 {
996 /*
997 * GVM requests
998 */
999 case VMMR0_DO_GVMM_CREATE_VM:
1000 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
1001 return VERR_INVALID_PARAMETER;
1002 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
1003
1004 case VMMR0_DO_GVMM_DESTROY_VM:
1005 if (pReqHdr || u64Arg)
1006 return VERR_INVALID_PARAMETER;
1007 return GVMMR0DestroyVM(pVM);
1008
1009 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1010 {
1011 if (!pVM)
1012 return VERR_INVALID_PARAMETER;
1013 return GVMMR0RegisterVCpu(pVM, idCpu);
1014 }
1015
1016 case VMMR0_DO_GVMM_SCHED_HALT:
1017 if (pReqHdr)
1018 return VERR_INVALID_PARAMETER;
1019 return GVMMR0SchedHalt(pVM, idCpu, u64Arg);
1020
1021 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1022 if (pReqHdr || u64Arg)
1023 return VERR_INVALID_PARAMETER;
1024 return GVMMR0SchedWakeUp(pVM, idCpu);
1025
1026 case VMMR0_DO_GVMM_SCHED_POKE:
1027 if (pReqHdr || u64Arg)
1028 return VERR_INVALID_PARAMETER;
1029 return GVMMR0SchedPoke(pVM, idCpu);
1030
1031 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1032 if (u64Arg)
1033 return VERR_INVALID_PARAMETER;
1034 return GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1035
1036 case VMMR0_DO_GVMM_SCHED_POLL:
1037 if (pReqHdr || u64Arg > 1)
1038 return VERR_INVALID_PARAMETER;
1039 return GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
1040
1041 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1042 if (u64Arg)
1043 return VERR_INVALID_PARAMETER;
1044 return GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
1045
1046 case VMMR0_DO_GVMM_RESET_STATISTICS:
1047 if (u64Arg)
1048 return VERR_INVALID_PARAMETER;
1049 return GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
1050
1051 /*
1052 * Initialize the R0 part of a VM instance.
1053 */
1054 case VMMR0_DO_VMMR0_INIT:
1055 return vmmR0InitVM(pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1056
1057 /*
1058 * Terminate the R0 part of a VM instance.
1059 */
1060 case VMMR0_DO_VMMR0_TERM:
1061 return VMMR0TermVM(pVM, NULL);
1062
1063 /*
1064 * Attempt to enable hm mode and check the current setting.
1065 */
1066 case VMMR0_DO_HM_ENABLE:
1067 return HMR0EnableAllCpus(pVM);
1068
1069 /*
1070 * Setup the hardware accelerated session.
1071 */
1072 case VMMR0_DO_HM_SETUP_VM:
1073 return HMR0SetupVM(pVM);
1074
1075 /*
1076 * Switch to RC to execute Hypervisor function.
1077 */
1078 case VMMR0_DO_CALL_HYPERVISOR:
1079 {
1080 int rc;
1081 bool fVTxDisabled;
1082
1083#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1084 if (RT_UNLIKELY(!PGMGetHyperCR3(VMMGetCpu0(pVM))))
1085 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1086#endif
1087
1088 RTCCUINTREG fFlags = ASMIntDisableFlags();
1089
1090#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1091 RTCPUID idHostCpu = RTMpCpuId();
1092 CPUMR0SetLApic(pVM, idHostCpu);
1093#endif
1094
1095 /* We might need to disable VT-x if the active switcher turns off paging. */
1096 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1097 if (RT_FAILURE(rc))
1098 return rc;
1099
1100 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1101
1102 /* Re-enable VT-x if previously turned off. */
1103 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1104
1105 /** @todo dispatch interrupts? */
1106 ASMSetFlags(fFlags);
1107 return rc;
1108 }
1109
1110 /*
1111 * PGM wrappers.
1112 */
1113 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1114 if (idCpu == NIL_VMCPUID)
1115 return VERR_INVALID_CPU_ID;
1116 return PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
1117
1118 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1119 if (idCpu == NIL_VMCPUID)
1120 return VERR_INVALID_CPU_ID;
1121 return PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu]);
1122
1123 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1124 if (idCpu == NIL_VMCPUID)
1125 return VERR_INVALID_CPU_ID;
1126 return PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
1127
1128 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1129 if (idCpu != 0)
1130 return VERR_INVALID_CPU_ID;
1131 return PGMR0PhysSetupIommu(pVM);
1132
1133 /*
1134 * GMM wrappers.
1135 */
1136 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1137 if (u64Arg)
1138 return VERR_INVALID_PARAMETER;
1139 return GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1140
1141 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1142 if (u64Arg)
1143 return VERR_INVALID_PARAMETER;
1144 return GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1145
1146 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1147 if (u64Arg)
1148 return VERR_INVALID_PARAMETER;
1149 return GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1150
1151 case VMMR0_DO_GMM_FREE_PAGES:
1152 if (u64Arg)
1153 return VERR_INVALID_PARAMETER;
1154 return GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1155
1156 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1157 if (u64Arg)
1158 return VERR_INVALID_PARAMETER;
1159 return GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1160
1161 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1162 if (u64Arg)
1163 return VERR_INVALID_PARAMETER;
1164 return GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
1165
1166 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1167 if (idCpu == NIL_VMCPUID)
1168 return VERR_INVALID_CPU_ID;
1169 if (u64Arg)
1170 return VERR_INVALID_PARAMETER;
1171 return GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1172
1173 case VMMR0_DO_GMM_BALLOONED_PAGES:
1174 if (u64Arg)
1175 return VERR_INVALID_PARAMETER;
1176 return GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1177
1178 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1179 if (u64Arg)
1180 return VERR_INVALID_PARAMETER;
1181 return GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1182
1183 case VMMR0_DO_GMM_SEED_CHUNK:
1184 if (pReqHdr)
1185 return VERR_INVALID_PARAMETER;
1186 return GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
1187
1188 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1189 if (idCpu == NIL_VMCPUID)
1190 return VERR_INVALID_CPU_ID;
1191 if (u64Arg)
1192 return VERR_INVALID_PARAMETER;
1193 return GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1194
1195 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1196 if (idCpu == NIL_VMCPUID)
1197 return VERR_INVALID_CPU_ID;
1198 if (u64Arg)
1199 return VERR_INVALID_PARAMETER;
1200 return GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1201
1202 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1203 if (idCpu == NIL_VMCPUID)
1204 return VERR_INVALID_CPU_ID;
1205 if ( u64Arg
1206 || pReqHdr)
1207 return VERR_INVALID_PARAMETER;
1208 return GMMR0ResetSharedModules(pVM, idCpu);
1209
1210#ifdef VBOX_WITH_PAGE_SHARING
1211 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1212 {
1213 if (idCpu == NIL_VMCPUID)
1214 return VERR_INVALID_CPU_ID;
1215 if ( u64Arg
1216 || pReqHdr)
1217 return VERR_INVALID_PARAMETER;
1218
1219 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1220 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf());
1221
1222# ifdef DEBUG_sandervl
1223 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
1224 /* Todo: this can have bad side effects for unexpected jumps back to r3. */
1225 int rc = GMMR0CheckSharedModulesStart(pVM);
1226 if (rc == VINF_SUCCESS)
1227 {
1228 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
1229 Assert( rc == VINF_SUCCESS
1230 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
1231 GMMR0CheckSharedModulesEnd(pVM);
1232 }
1233# else
1234 int rc = GMMR0CheckSharedModules(pVM, pVCpu);
1235# endif
1236 return rc;
1237 }
1238#endif
1239
1240#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1241 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1242 if (u64Arg)
1243 return VERR_INVALID_PARAMETER;
1244 return GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1245#endif
1246
1247 case VMMR0_DO_GMM_QUERY_STATISTICS:
1248 if (u64Arg)
1249 return VERR_INVALID_PARAMETER;
1250 return GMMR0QueryStatisticsReq(pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1251
1252 case VMMR0_DO_GMM_RESET_STATISTICS:
1253 if (u64Arg)
1254 return VERR_INVALID_PARAMETER;
1255 return GMMR0ResetStatisticsReq(pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1256
1257 /*
1258 * A quick GCFGM mock-up.
1259 */
1260 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1261 case VMMR0_DO_GCFGM_SET_VALUE:
1262 case VMMR0_DO_GCFGM_QUERY_VALUE:
1263 {
1264 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1265 return VERR_INVALID_PARAMETER;
1266 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1267 if (pReq->Hdr.cbReq != sizeof(*pReq))
1268 return VERR_INVALID_PARAMETER;
1269 int rc;
1270 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1271 {
1272 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1273 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1274 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1275 }
1276 else
1277 {
1278 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1279 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1280 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1281 }
1282 return rc;
1283 }
1284
1285 /*
1286 * PDM Wrappers.
1287 */
1288 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1289 {
1290 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1291 return VERR_INVALID_PARAMETER;
1292 return PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1293 }
1294
1295 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1296 {
1297 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1298 return VERR_INVALID_PARAMETER;
1299 return PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1300 }
1301
1302 /*
1303 * Requests to the internal networking service.
1304 */
1305 case VMMR0_DO_INTNET_OPEN:
1306 {
1307 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1308 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1309 return VERR_INVALID_PARAMETER;
1310 return IntNetR0OpenReq(pSession, pReq);
1311 }
1312
1313 case VMMR0_DO_INTNET_IF_CLOSE:
1314 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1315 return VERR_INVALID_PARAMETER;
1316 return IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1317
1318 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1319 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1320 return VERR_INVALID_PARAMETER;
1321 return IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1322
1323 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1324 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1325 return VERR_INVALID_PARAMETER;
1326 return IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1327
1328 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1329 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1330 return VERR_INVALID_PARAMETER;
1331 return IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1332
1333 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1334 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1335 return VERR_INVALID_PARAMETER;
1336 return IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1337
1338 case VMMR0_DO_INTNET_IF_SEND:
1339 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1340 return VERR_INVALID_PARAMETER;
1341 return IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1342
1343 case VMMR0_DO_INTNET_IF_WAIT:
1344 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1345 return VERR_INVALID_PARAMETER;
1346 return IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1347
1348 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1349 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1350 return VERR_INVALID_PARAMETER;
1351 return IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1352
1353#ifdef VBOX_WITH_PCI_PASSTHROUGH
1354 /*
1355 * Requests to host PCI driver service.
1356 */
1357 case VMMR0_DO_PCIRAW_REQ:
1358 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1359 return VERR_INVALID_PARAMETER;
1360 return PciRawR0ProcessReq(pSession, pVM, (PPCIRAWSENDREQ)pReqHdr);
1361#endif
1362 /*
1363 * For profiling.
1364 */
1365 case VMMR0_DO_NOP:
1366 case VMMR0_DO_SLOW_NOP:
1367 return VINF_SUCCESS;
1368
1369 /*
1370 * For testing Ring-0 APIs invoked in this environment.
1371 */
1372 case VMMR0_DO_TESTS:
1373 /** @todo make new test */
1374 return VINF_SUCCESS;
1375
1376
1377#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1378 case VMMR0_DO_TEST_SWITCHER3264:
1379 if (idCpu == NIL_VMCPUID)
1380 return VERR_INVALID_CPU_ID;
1381 return HMR0TestSwitcher3264(pVM);
1382#endif
1383 default:
1384 /*
1385 * We're returning VERR_NOT_SUPPORT here so we've got something else
1386 * than -1 which the interrupt gate glue code might return.
1387 */
1388 Log(("operation %#x is not supported\n", enmOperation));
1389 return VERR_NOT_SUPPORTED;
1390 }
1391}
1392
1393
1394/**
1395 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1396 */
1397typedef struct VMMR0ENTRYEXARGS
1398{
1399 PVM pVM;
1400 VMCPUID idCpu;
1401 VMMR0OPERATION enmOperation;
1402 PSUPVMMR0REQHDR pReq;
1403 uint64_t u64Arg;
1404 PSUPDRVSESSION pSession;
1405} VMMR0ENTRYEXARGS;
1406/** Pointer to a vmmR0EntryExWrapper argument package. */
1407typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1408
1409/**
1410 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1411 *
1412 * @returns VBox status code.
1413 * @param pvArgs The argument package
1414 */
1415static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
1416{
1417 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1418 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1419 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1420 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1421 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1422 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1423}
1424
1425
1426/**
1427 * The Ring 0 entry point, called by the support library (SUP).
1428 *
1429 * @returns VBox status code.
1430 * @param pVM Pointer to the VM.
1431 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1432 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1433 * @param enmOperation Which operation to execute.
1434 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
1435 * @param u64Arg Some simple constant argument.
1436 * @param pSession The session of the caller.
1437 * @remarks Assume called with interrupts _enabled_.
1438 */
1439VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1440{
1441 /*
1442 * Requests that should only happen on the EMT thread will be
1443 * wrapped in a setjmp so we can assert without causing trouble.
1444 */
1445 if ( VALID_PTR(pVM)
1446 && pVM->pVMR0
1447 && idCpu < pVM->cCpus)
1448 {
1449 switch (enmOperation)
1450 {
1451 /* These might/will be called before VMMR3Init. */
1452 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1453 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1454 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1455 case VMMR0_DO_GMM_FREE_PAGES:
1456 case VMMR0_DO_GMM_BALLOONED_PAGES:
1457 /* On the mac we might not have a valid jmp buf, so check these as well. */
1458 case VMMR0_DO_VMMR0_INIT:
1459 case VMMR0_DO_VMMR0_TERM:
1460 {
1461 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1462
1463 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1464 break;
1465
1466 /** @todo validate this EMT claim... GVM knows. */
1467 VMMR0ENTRYEXARGS Args;
1468 Args.pVM = pVM;
1469 Args.idCpu = idCpu;
1470 Args.enmOperation = enmOperation;
1471 Args.pReq = pReq;
1472 Args.u64Arg = u64Arg;
1473 Args.pSession = pSession;
1474 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1475 }
1476
1477 default:
1478 break;
1479 }
1480 }
1481 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
1482}
1483
1484
1485/**
1486 * Checks whether we've armed the ring-0 long jump machinery.
1487 *
1488 * @returns @c true / @c false
1489 * @param pVCpu The caller's cross context virtual CPU structure.
1490 * @thread EMT
1491 * @sa VMMIsLongJumpArmed
1492 */
1493VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
1494{
1495#ifdef RT_ARCH_X86
1496 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
1497 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
1498#else
1499 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
1500 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
1501#endif
1502}
1503
1504
1505/**
1506 * Internal R0 logger worker: Flush logger.
1507 *
1508 * @param pLogger The logger instance to flush.
1509 * @remark This function must be exported!
1510 */
1511VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
1512{
1513#ifdef LOG_ENABLED
1514 /*
1515 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
1516 * (This is a bit paranoid code.)
1517 */
1518 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1519 if ( !VALID_PTR(pR0Logger)
1520 || !VALID_PTR(pR0Logger + 1)
1521 || pLogger->u32Magic != RTLOGGER_MAGIC)
1522 {
1523# ifdef DEBUG
1524 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
1525# endif
1526 return;
1527 }
1528 if (pR0Logger->fFlushingDisabled)
1529 return; /* quietly */
1530
1531 PVM pVM = pR0Logger->pVM;
1532 if ( !VALID_PTR(pVM)
1533 || pVM->pVMR0 != pVM)
1534 {
1535# ifdef DEBUG
1536 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
1537# endif
1538 return;
1539 }
1540
1541 PVMCPU pVCpu = VMMGetCpu(pVM);
1542 if (pVCpu)
1543 {
1544 /*
1545 * Check that the jump buffer is armed.
1546 */
1547# ifdef RT_ARCH_X86
1548 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
1549 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1550# else
1551 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
1552 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1553# endif
1554 {
1555# ifdef DEBUG
1556 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
1557# endif
1558 return;
1559 }
1560 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
1561 }
1562# ifdef DEBUG
1563 else
1564 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
1565# endif
1566#endif
1567}
1568
1569/**
1570 * Internal R0 logger worker: Custom prefix.
1571 *
1572 * @returns Number of chars written.
1573 *
1574 * @param pLogger The logger instance.
1575 * @param pchBuf The output buffer.
1576 * @param cchBuf The size of the buffer.
1577 * @param pvUser User argument (ignored).
1578 */
1579VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1580{
1581 NOREF(pvUser);
1582#ifdef LOG_ENABLED
1583 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1584 if ( !VALID_PTR(pR0Logger)
1585 || !VALID_PTR(pR0Logger + 1)
1586 || pLogger->u32Magic != RTLOGGER_MAGIC
1587 || cchBuf < 2)
1588 return 0;
1589
1590 static const char s_szHex[17] = "0123456789abcdef";
1591 VMCPUID const idCpu = pR0Logger->idCpu;
1592 pchBuf[1] = s_szHex[ idCpu & 15];
1593 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1594
1595 return 2;
1596#else
1597 return 0;
1598#endif
1599}
1600
1601#ifdef LOG_ENABLED
1602
1603/**
1604 * Disables flushing of the ring-0 debug log.
1605 *
1606 * @param pVCpu Pointer to the VMCPU.
1607 */
1608VMMR0DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
1609{
1610 if (pVCpu->vmm.s.pR0LoggerR0)
1611 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
1612}
1613
1614
1615/**
1616 * Enables flushing of the ring-0 debug log.
1617 *
1618 * @param pVCpu Pointer to the VMCPU.
1619 */
1620VMMR0DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
1621{
1622 if (pVCpu->vmm.s.pR0LoggerR0)
1623 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
1624}
1625
1626
1627/**
1628 * Checks if log flushing is disabled or not.
1629 *
1630 * @param pVCpu Pointer to the VMCPU.
1631 */
1632VMMR0DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
1633{
1634 if (pVCpu->vmm.s.pR0LoggerR0)
1635 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
1636 return true;
1637}
1638#endif /* LOG_ENABLED */
1639
1640/**
1641 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
1642 *
1643 * @returns true if the breakpoint should be hit, false if it should be ignored.
1644 */
1645DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
1646{
1647#if 0
1648 return true;
1649#else
1650 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1651 if (pVM)
1652 {
1653 PVMCPU pVCpu = VMMGetCpu(pVM);
1654
1655 if (pVCpu)
1656 {
1657#ifdef RT_ARCH_X86
1658 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
1659 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1660#else
1661 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
1662 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1663#endif
1664 {
1665 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
1666 return RT_FAILURE_NP(rc);
1667 }
1668 }
1669 }
1670#ifdef RT_OS_LINUX
1671 return true;
1672#else
1673 return false;
1674#endif
1675#endif
1676}
1677
1678
1679/**
1680 * Override this so we can push it up to ring-3.
1681 *
1682 * @param pszExpr Expression. Can be NULL.
1683 * @param uLine Location line number.
1684 * @param pszFile Location file name.
1685 * @param pszFunction Location function name.
1686 */
1687DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
1688{
1689 /*
1690 * To the log.
1691 */
1692 LogAlways(("\n!!R0-Assertion Failed!!\n"
1693 "Expression: %s\n"
1694 "Location : %s(%d) %s\n",
1695 pszExpr, pszFile, uLine, pszFunction));
1696
1697 /*
1698 * To the global VMM buffer.
1699 */
1700 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1701 if (pVM)
1702 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
1703 "\n!!R0-Assertion Failed!!\n"
1704 "Expression: %s\n"
1705 "Location : %s(%d) %s\n",
1706 pszExpr, pszFile, uLine, pszFunction);
1707
1708 /*
1709 * Continue the normal way.
1710 */
1711 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
1712}
1713
1714
1715/**
1716 * Callback for RTLogFormatV which writes to the ring-3 log port.
1717 * See PFNLOGOUTPUT() for details.
1718 */
1719static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
1720{
1721 for (size_t i = 0; i < cbChars; i++)
1722 LogAlways(("%c", pachChars[i]));
1723
1724 NOREF(pv);
1725 return cbChars;
1726}
1727
1728
1729/**
1730 * Override this so we can push it up to ring-3.
1731 *
1732 * @param pszFormat The format string.
1733 * @param va Arguments.
1734 */
1735DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
1736{
1737 va_list vaCopy;
1738
1739 /*
1740 * Push the message to the loggers.
1741 */
1742 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
1743 if (pLog)
1744 {
1745 va_copy(vaCopy, va);
1746 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
1747 va_end(vaCopy);
1748 }
1749 pLog = RTLogRelDefaultInstance();
1750 if (pLog)
1751 {
1752 va_copy(vaCopy, va);
1753 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
1754 va_end(vaCopy);
1755 }
1756
1757 /*
1758 * Push it to the global VMM buffer.
1759 */
1760 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1761 if (pVM)
1762 {
1763 va_copy(vaCopy, va);
1764 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
1765 va_end(vaCopy);
1766 }
1767
1768 /*
1769 * Continue the normal way.
1770 */
1771 RTAssertMsg2V(pszFormat, va);
1772}
1773
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette